mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2024-06-16 23:32:23 +00:00
Compare commits
143 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
377969ea67 | ||
|
f05398a6b3 | ||
|
9555ac7bb8 | ||
|
f01ef40a8e | ||
|
8e7b27cc36 | ||
|
d230ee087c | ||
|
f8f14727b9 | ||
|
753a9e0bae | ||
|
f5fb69b64f | ||
|
3261534438 | ||
|
46762d9fde | ||
|
6cadb2627a | ||
|
0fe93edea6 | ||
|
e9aa5a545e | ||
|
9dcc738f85 | ||
|
84a7c7da5d | ||
|
ca9234ed86 | ||
|
27dc67fadd | ||
|
2ad33ec97f | ||
|
e1a8df96db | ||
|
e42a37c6c1 | ||
|
129b835ac7 | ||
|
2d98aa3045 | ||
|
93636eb3c3 | ||
|
1e42755187 | ||
|
ce8efcc48f | ||
|
79ce5b49bc | ||
|
7c3cad197c | ||
|
000c606029 | ||
|
29144b2ce0 | ||
|
ea04b6f151 | ||
|
3427217686 | ||
|
a1fbd6d729 | ||
|
2cbfe6fa5b | ||
|
d86c4f2c23 | ||
|
6d73f30b4f | ||
|
d0c22b9fc9 | ||
|
d6b97090fa | ||
|
94b077cb2d | ||
|
bb2412d033 | ||
|
b9bdc9b8e2 | ||
|
897bdf8343 | ||
|
569add453d | ||
|
77cd5b5954 | ||
|
4438da39f9 | ||
|
0b2383ab56 | ||
|
ad1d65bdf8 | ||
|
3b283c289e | ||
|
4b9384cb2b | ||
|
0f39d96518 | ||
|
edf7484a70 | ||
|
8b66e34415 | ||
|
1d00e34bbb | ||
|
1b801406d6 | ||
|
5e46a43306 | ||
|
5c77431c2d | ||
|
2775c6ce8a | ||
|
890e668071 | ||
|
596c167312 | ||
|
ae3a153bdb | ||
|
2c36993792 | ||
|
d672ad3f76 | ||
|
a641b48884 | ||
|
98b2178c7d | ||
|
76a3f0f531 | ||
|
c5665e7b77 | ||
|
cbdcf8ef9f | ||
|
3337594d60 | ||
|
2daa8be1f1 | ||
|
eccb3ab947 | ||
|
3246251f29 | ||
|
8ab200224e | ||
|
34e00e1478 | ||
|
0fdda3bc2f | ||
|
48836501bf | ||
|
f863ffb89a | ||
|
03c6ed2e07 | ||
|
efc6eb0073 | ||
|
cec1e87679 | ||
|
512b3b9b7c | ||
|
93da5091e6 | ||
|
915496c103 | ||
|
ecb31c85d6 | ||
|
d722328f05 | ||
|
cb4b683dcd | ||
|
6eaf131922 | ||
|
8933ac2ee7 | ||
|
6822e445bb | ||
|
18fbc1ccf6 | ||
|
4861f6decc | ||
|
b435ee49ad | ||
|
193f86e43e | ||
|
66a7baa67c | ||
|
18d66474e0 | ||
|
ff8db4fd78 | ||
|
b2f9af718e | ||
|
198fd2fc1d | ||
|
ec8a9c82df | ||
|
ef5e0bd4e5 | ||
|
30b408eaa9 | ||
|
e205e3b7db | ||
|
ca1a9e26d8 | ||
|
f3a1385aee | ||
|
008a2cf298 | ||
|
f0c9a7fbc3 | ||
|
9162b13123 | ||
|
480bf9b0c1 | ||
|
f96c5e8a1e | ||
|
3d4be24902 | ||
|
bf41d74501 | ||
|
01e33a4919 | ||
|
bc26bfa589 | ||
|
ccc51e7580 | ||
|
99a59bc4f3 | ||
|
a77482575a | ||
|
bbd630f1ee | ||
|
d18b793c71 | ||
|
d3a1d875d5 | ||
|
d6e0ace192 | ||
|
60cbfa59bf | ||
|
5ab7010c37 | ||
|
ad2cfd8b97 | ||
|
32543c46da | ||
|
66bff73ebf | ||
|
83d5432cbf | ||
|
f579a4154c | ||
|
f5a19c5f8b | ||
|
aa9bc1f785 | ||
|
f162e85e44 | ||
|
33ef70c192 | ||
|
3d2df6ce11 | ||
|
6cdcb3b297 | ||
|
d1af468700 | ||
|
ae1c53f4e5 | ||
|
bc57c4b193 | ||
|
61ae4c9cf5 | ||
|
8d7b3db33d | ||
|
e9ec3741ae | ||
|
dacd50f3f1 | ||
|
9412112639 | ||
|
aaeae16983 | ||
|
d892880dd2 | ||
|
4395e8e888 |
526
.env.template
526
.env.template
|
@ -10,22 +10,63 @@
|
||||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||||
## Vaultwarden.
|
## Vaultwarden.
|
||||||
|
|
||||||
|
####################
|
||||||
|
### Data folders ###
|
||||||
|
####################
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
|
# ATTACHMENTS_FOLDER=data/attachments
|
||||||
|
# SENDS_FOLDER=data/sends
|
||||||
|
# TMP_FOLDER=data/tmp
|
||||||
|
|
||||||
|
## Templates data folder, by default uses embedded templates
|
||||||
|
## Check source code to see the format
|
||||||
|
# TEMPLATES_FOLDER=data/templates
|
||||||
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
|
## Web vault settings
|
||||||
|
# WEB_VAULT_FOLDER=web-vault/
|
||||||
|
# WEB_VAULT_ENABLED=true
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Database settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
## When using MySQL, specify an appropriate connection URI.
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
## or keyword/value connection string.
|
## or keyword/value connection string.
|
||||||
## Details:
|
## Details:
|
||||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
|
||||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Enable WAL for the DB
|
||||||
|
## Set to false to avoid enabling WAL during startup.
|
||||||
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
|
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||||
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
|
## cause performance degradation or might render the service unable to start.
|
||||||
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
|
## Database timeout
|
||||||
|
## Timeout when acquiring database connection
|
||||||
|
# DATABASE_TIMEOUT=30
|
||||||
|
|
||||||
## Database max connections
|
## Database max connections
|
||||||
## Define the size of the connection pool used for connecting to the database.
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
# DATABASE_MAX_CONNS=10
|
# DATABASE_MAX_CONNS=10
|
||||||
|
@ -39,70 +80,31 @@
|
||||||
## - PostgreSQL: ""
|
## - PostgreSQL: ""
|
||||||
# DATABASE_CONN_INIT=""
|
# DATABASE_CONN_INIT=""
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
#################
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
### WebSocket ###
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
#################
|
||||||
# ATTACHMENTS_FOLDER=data/attachments
|
|
||||||
# SENDS_FOLDER=data/sends
|
|
||||||
# TMP_FOLDER=data/tmp
|
|
||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
## Enable websocket notifications
|
||||||
## Check source code to see the format
|
# ENABLE_WEBSOCKET=true
|
||||||
# TEMPLATES_FOLDER=/path/to/templates
|
|
||||||
## Automatically reload the templates for every request, slow, use only for development
|
|
||||||
# RELOAD_TEMPLATES=false
|
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
##########################
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
### Push notifications ###
|
||||||
# IP_HEADER=X-Real-IP
|
##########################
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_TTL=2592000
|
|
||||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_NEGTTL=259200
|
|
||||||
|
|
||||||
## Web vault settings
|
|
||||||
# WEB_VAULT_FOLDER=web-vault/
|
|
||||||
# WEB_VAULT_ENABLED=true
|
|
||||||
|
|
||||||
## Enables websocket notifications
|
|
||||||
# WEBSOCKET_ENABLED=false
|
|
||||||
|
|
||||||
## Controls the WebSocket server address and port
|
|
||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
|
||||||
# WEBSOCKET_PORT=3012
|
|
||||||
|
|
||||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||||
# PUSH_ENABLED=true
|
## If you choose "European Union" Data Region, uncomment PUSH_RELAY_URI and PUSH_IDENTITY_URI then replace .com by .eu
|
||||||
|
## Details about mobile client push notification:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||||
|
# PUSH_ENABLED=false
|
||||||
# PUSH_INSTALLATION_ID=CHANGEME
|
# PUSH_INSTALLATION_ID=CHANGEME
|
||||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||||
## Don't change this unless you know what you're doing.
|
## Don't change this unless you know what you're doing.
|
||||||
# PUSH_RELAY_BASE_URI=https://push.bitwarden.com
|
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||||
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||||
|
|
||||||
## Controls whether users are allowed to create Bitwarden Sends.
|
#####################
|
||||||
## This setting applies globally to all users.
|
### Schedule jobs ###
|
||||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
#####################
|
||||||
# SENDS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether users can enable emergency access to their accounts.
|
|
||||||
## This setting applies globally to all users.
|
|
||||||
# EMERGENCY_ACCESS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether event logging is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
|
||||||
# ORG_EVENTS_ENABLED=false
|
|
||||||
|
|
||||||
## Number of days to retain events stored in the database.
|
|
||||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
|
||||||
# EVENTS_DAYS_RETAIN=
|
|
||||||
|
|
||||||
## BETA FEATURE: Groups
|
|
||||||
## Controls whether group support is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default because this is a beta feature, it contains known issues!
|
|
||||||
## KNOW WHAT YOU ARE DOING!
|
|
||||||
# ORG_GROUPS_ENABLED=false
|
|
||||||
|
|
||||||
## Job scheduler settings
|
## Job scheduler settings
|
||||||
##
|
##
|
||||||
|
@ -143,60 +145,69 @@
|
||||||
## Cron schedule of the job that cleans old events from the event table.
|
## Cron schedule of the job that cleans old events from the event table.
|
||||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||||
|
## Number of days to retain events stored in the database.
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||||
# EXTENDED_LOGGING=true
|
# EVENTS_DAYS_RETAIN=
|
||||||
|
|
||||||
## Timestamp format used in extended logging.
|
|
||||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
|
||||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
|
||||||
|
|
||||||
## Logging to file
|
|
||||||
# LOG_FILE=/path/to/log
|
|
||||||
|
|
||||||
## Logging to Syslog
|
|
||||||
## This requires extended logging
|
|
||||||
# USE_SYSLOG=false
|
|
||||||
|
|
||||||
## Log level
|
|
||||||
## Change the verbosity of the log output
|
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
|
||||||
## routes and static file, websocket and alive requests
|
|
||||||
# LOG_LEVEL=Info
|
|
||||||
|
|
||||||
## Enable WAL for the DB
|
|
||||||
## Set to false to avoid enabling WAL during startup.
|
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
|
||||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
|
||||||
## cause performance degradation or might render the service unable to start.
|
|
||||||
# ENABLE_DB_WAL=true
|
|
||||||
|
|
||||||
## Database connection retries
|
|
||||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
|
||||||
# DB_CONNECTION_RETRIES=15
|
|
||||||
|
|
||||||
## Icon service
|
|
||||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
|
||||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
|
||||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
|
||||||
##
|
##
|
||||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
## redirect to the corresponding icon at the external service. An external service may
|
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
|
||||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
|
||||||
## for certain sites have been cached.
|
|
||||||
# ICON_SERVICE=internal
|
|
||||||
|
|
||||||
## Icon redirect code
|
########################
|
||||||
## The HTTP status code to use for redirects to an external icon service.
|
### General settings ###
|
||||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
########################
|
||||||
## Temporary redirects are useful while testing different icon services, but once a service
|
|
||||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
## Domain settings
|
||||||
## are currently better supported by the Bitwarden clients.
|
## The domain must match the address from where you access the server
|
||||||
# ICON_REDIRECT_CODE=302
|
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||||
|
## like attachment downloads, email links and U2F.
|
||||||
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
|
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
|
||||||
|
## Details:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
|
||||||
|
## For development
|
||||||
|
# DOMAIN=http://localhost
|
||||||
|
## For public server
|
||||||
|
# DOMAIN=https://vw.domain.tld
|
||||||
|
## For public server (URL with port number)
|
||||||
|
# DOMAIN=https://vw.domain.tld:8443
|
||||||
|
## For public server (URL with path)
|
||||||
|
# DOMAIN=https://domain.tld/vw
|
||||||
|
|
||||||
|
## Controls whether users are allowed to create Bitwarden Sends.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||||
|
# SENDS_ALLOWED=true
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
|
## Per-organization attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per organization.
|
||||||
|
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user send storage limit (KB)
|
||||||
|
## Max kilobytes of send storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further sends.
|
||||||
|
# USER_SEND_LIMIT=
|
||||||
|
|
||||||
|
## Number of days to wait before auto-deleting a trashed item.
|
||||||
|
## If unset (the default), trashed items are not auto-deleted.
|
||||||
|
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||||
|
# TRASH_AUTO_DELETE_DAYS=
|
||||||
|
|
||||||
|
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||||
|
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||||
|
## master password was provided but the required 2FA step was not completed, which
|
||||||
|
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading in the internal icon service.
|
## Set to true to disable icon downloading in the internal icon service.
|
||||||
|
@ -205,38 +216,6 @@
|
||||||
## will be deleted eventually, but won't be downloaded again.
|
## will be deleted eventually, but won't be downloaded again.
|
||||||
# DISABLE_ICON_DOWNLOAD=false
|
# DISABLE_ICON_DOWNLOAD=false
|
||||||
|
|
||||||
## Icon download timeout
|
|
||||||
## Configure the timeout value when downloading the favicons.
|
|
||||||
## The default is 10 seconds, but this could be to low on slower network connections
|
|
||||||
# ICON_DOWNLOAD_TIMEOUT=10
|
|
||||||
|
|
||||||
## Icon blacklist Regex
|
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
|
||||||
## NOTE: Always enclose this regex withing single quotes!
|
|
||||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
|
||||||
|
|
||||||
## Disable 2FA remember
|
|
||||||
## Enabling this would force the users to use a second factor to login every time.
|
|
||||||
## Note that the checkbox would still be present, but ignored.
|
|
||||||
# DISABLE_2FA_REMEMBER=false
|
|
||||||
|
|
||||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
|
||||||
# EMAIL_ATTEMPTS_LIMIT=3
|
|
||||||
|
|
||||||
## Token expiration time
|
|
||||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
|
||||||
# EMAIL_EXPIRATION_TIME=600
|
|
||||||
|
|
||||||
## Email token size
|
|
||||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
|
||||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
|
||||||
# EMAIL_TOKEN_SIZE=6
|
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
|
@ -258,6 +237,11 @@
|
||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls whether event logging is enabled for organizations
|
||||||
|
## This setting applies to organizations.
|
||||||
|
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||||
|
# ORG_EVENTS_ENABLED=false
|
||||||
|
|
||||||
## Controls which users can create new orgs.
|
## Controls which users can create new orgs.
|
||||||
## Blank or 'all' means all users can create orgs (this is the default):
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
# ORG_CREATION_USERS=
|
# ORG_CREATION_USERS=
|
||||||
|
@ -266,6 +250,122 @@
|
||||||
## A comma-separated list means only those users can create orgs:
|
## A comma-separated list means only those users can create orgs:
|
||||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
|
# INVITATIONS_ALLOWED=true
|
||||||
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
|
|
||||||
|
## The number of hours after which an organization invite token, emergency access invite token,
|
||||||
|
## email verification token and deletion request token will expire (must be at least 1)
|
||||||
|
# INVITATION_EXPIRATION_HOURS=120
|
||||||
|
|
||||||
|
## Controls whether users can enable emergency access to their accounts.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# EMERGENCY_ACCESS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether users can change their email.
|
||||||
|
## This setting applies globally to all users
|
||||||
|
# EMAIL_CHANGE_ALLOWED=true
|
||||||
|
|
||||||
|
## Number of server-side passwords hashing iterations for the password hash.
|
||||||
|
## The default for new users. If changed, it will be updated during login for existing users.
|
||||||
|
# PASSWORD_ITERATIONS=600000
|
||||||
|
|
||||||
|
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||||
|
# PASSWORD_HINTS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether a password hint should be shown directly in the web page if
|
||||||
|
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||||
|
## as this provides unauthenticated access to potentially sensitive data.
|
||||||
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Advanced settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
|
## Icon service
|
||||||
|
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
|
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
|
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
|
##
|
||||||
|
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||||
|
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||||
|
## redirect to the corresponding icon at the external service. An external service may
|
||||||
|
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||||
|
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||||
|
## for certain sites have been cached.
|
||||||
|
# ICON_SERVICE=internal
|
||||||
|
|
||||||
|
## Icon redirect code
|
||||||
|
## The HTTP status code to use for redirects to an external icon service.
|
||||||
|
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
|
## Temporary redirects are useful while testing different icon services, but once a service
|
||||||
|
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||||
|
## are currently better supported by the Bitwarden clients.
|
||||||
|
# ICON_REDIRECT_CODE=302
|
||||||
|
|
||||||
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (30 days)
|
||||||
|
# ICON_CACHE_TTL=2592000
|
||||||
|
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (3 days)
|
||||||
|
# ICON_CACHE_NEGTTL=259200
|
||||||
|
|
||||||
|
## Icon download timeout
|
||||||
|
## Configure the timeout value when downloading the favicons.
|
||||||
|
## The default is 10 seconds, but this could be to low on slower network connections
|
||||||
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
|
## Icon blacklist Regex
|
||||||
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
|
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
|
## Client Settings
|
||||||
|
## Enable experimental feature flags for clients.
|
||||||
|
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||||
|
##
|
||||||
|
## The following flags are available:
|
||||||
|
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
||||||
|
## - "autofill-v2": Use the new autofill implementation.
|
||||||
|
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||||
|
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||||
|
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Timestamp format used in extended logging.
|
||||||
|
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||||
|
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
|
||||||
|
## Logging to Syslog
|
||||||
|
## This requires extended logging
|
||||||
|
# USE_SYSLOG=false
|
||||||
|
|
||||||
|
## Logging to file
|
||||||
|
# LOG_FILE=/path/to/log
|
||||||
|
|
||||||
|
## Log level
|
||||||
|
## Change the verbosity of the log output
|
||||||
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
|
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||||
|
## routes and static file, websocket and alive requests
|
||||||
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
## Token for the admin interface, preferably an Argon2 PCH string
|
## Token for the admin interface, preferably an Argon2 PCH string
|
||||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||||
|
@ -281,54 +381,13 @@
|
||||||
## meant to be used with the use of a separate auth layer in front
|
## meant to be used with the use of a separate auth layer in front
|
||||||
# DISABLE_ADMIN_TOKEN=false
|
# DISABLE_ADMIN_TOKEN=false
|
||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||||
# INVITATIONS_ALLOWED=true
|
# ADMIN_RATELIMIT_SECONDS=300
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||||
# INVITATION_ORG_NAME=Vaultwarden
|
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||||
|
|
||||||
## The number of hours after which an organization invite token, emergency access invite token,
|
## Set the lifetime of admin sessions to this value (in minutes).
|
||||||
## email verification token and deletion request token will expire (must be at least 1)
|
# ADMIN_SESSION_LIFETIME=20
|
||||||
# INVITATION_EXPIRATION_HOURS=120
|
|
||||||
|
|
||||||
## Per-organization attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per organization.
|
|
||||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
|
||||||
# ORG_ATTACHMENT_LIMIT=
|
|
||||||
## Per-user attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per user.
|
|
||||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
|
||||||
# USER_ATTACHMENT_LIMIT=
|
|
||||||
|
|
||||||
## Number of days to wait before auto-deleting a trashed item.
|
|
||||||
## If unset (the default), trashed items are not auto-deleted.
|
|
||||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
|
||||||
# TRASH_AUTO_DELETE_DAYS=
|
|
||||||
|
|
||||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
|
||||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
|
||||||
## master password was provided but the required 2FA step was not completed, which
|
|
||||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
|
||||||
## This setting applies globally to all users.
|
|
||||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
|
||||||
|
|
||||||
## Number of server-side passwords hashing iterations for the password hash.
|
|
||||||
## The default for new users. If changed, it will be updated during login for existing users.
|
|
||||||
# PASSWORD_ITERATIONS=350000
|
|
||||||
|
|
||||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
|
||||||
# PASSWORD_HINTS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether a password hint should be shown directly in the web page if
|
|
||||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
|
||||||
## as this provides unauthenticated access to potentially sensitive data.
|
|
||||||
# SHOW_PASSWORD_HINT=false
|
|
||||||
|
|
||||||
## Domain settings
|
|
||||||
## The domain must match the address from where you access the server
|
|
||||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
|
||||||
## like attachment downloads, email links and U2F.
|
|
||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
|
||||||
# DOMAIN=https://vw.domain.tld:8443
|
|
||||||
|
|
||||||
## Allowed iframe ancestors (Know the risks!)
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
|
@ -343,13 +402,16 @@
|
||||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||||
|
|
||||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
## BETA FEATURE: Groups
|
||||||
# ADMIN_RATELIMIT_SECONDS=300
|
## Controls whether group support is enabled for organizations
|
||||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
## This setting applies to organizations.
|
||||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
## Disabled by default because this is a beta feature, it contains known issues!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# ORG_GROUPS_ENABLED=false
|
||||||
|
|
||||||
## Set the lifetime of admin sessions to this value (in minutes).
|
########################
|
||||||
# ADMIN_SESSION_LIFETIME=20
|
### MFA/2FA settings ###
|
||||||
|
########################
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
|
@ -370,6 +432,30 @@
|
||||||
## After that, you should be able to follow the rest of the guide linked above,
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
|
||||||
|
## Email 2FA settings
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
##
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
##
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
##
|
||||||
|
## Setup email 2FA regardless of any organization policy
|
||||||
|
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
|
||||||
|
## Automatically setup email 2FA as fallback provider when needed
|
||||||
|
# EMAIL_2FA_AUTO_FALLBACK=false
|
||||||
|
|
||||||
|
## Other MFA/2FA settings
|
||||||
|
## Disable 2FA remember
|
||||||
|
## Enabling this would force the users to use a second factor to login every time.
|
||||||
|
## Note that the checkbox would still be present, but ignored.
|
||||||
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
##
|
||||||
## Authenticator Settings
|
## Authenticator Settings
|
||||||
## Disable authenticator time drifted codes to be valid.
|
## Disable authenticator time drifted codes to be valid.
|
||||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||||
|
@ -382,12 +468,9 @@
|
||||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||||
|
|
||||||
## Rocket specific settings
|
###########################
|
||||||
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
### SMTP Email settings ###
|
||||||
# ROCKET_ADDRESS=0.0.0.0
|
###########################
|
||||||
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
|
||||||
# ROCKET_WORKERS=10
|
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
|
||||||
|
|
||||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
|
@ -395,12 +478,19 @@
|
||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=vaultwarden@domain.tld
|
# SMTP_FROM=vaultwarden@domain.tld
|
||||||
# SMTP_FROM_NAME=Vaultwarden
|
# SMTP_FROM_NAME=Vaultwarden
|
||||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
|
||||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
|
|
||||||
|
## Choose the type of secure connection for SMTP. The default is "starttls".
|
||||||
|
## The available options are:
|
||||||
|
## - "starttls": The default port is 587.
|
||||||
|
## - "force_tls": The default port is 465.
|
||||||
|
## - "off": The default port is 25.
|
||||||
|
## Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||||
|
# SMTP_SECURITY=starttls
|
||||||
|
# SMTP_PORT=587
|
||||||
|
|
||||||
# Whether to send mail via the `sendmail` command
|
# Whether to send mail via the `sendmail` command
|
||||||
# USE_SENDMAIL=false
|
# USE_SENDMAIL=false
|
||||||
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||||
|
@ -409,7 +499,7 @@
|
||||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
## Multiple options need to be separated by a comma ','.
|
## Multiple options need to be separated by a comma ','.
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
# SMTP_AUTH_MECHANISM=
|
||||||
|
|
||||||
## Server name sent during the SMTP HELO
|
## Server name sent during the SMTP HELO
|
||||||
## By default this value should be is on the machine's hostname,
|
## By default this value should be is on the machine's hostname,
|
||||||
|
@ -417,30 +507,34 @@
|
||||||
# HELO_NAME=
|
# HELO_NAME=
|
||||||
|
|
||||||
## Embed images as email attachments
|
## Embed images as email attachments
|
||||||
# SMTP_EMBED_IMAGES=false
|
# SMTP_EMBED_IMAGES=true
|
||||||
|
|
||||||
## SMTP debugging
|
## SMTP debugging
|
||||||
## When set to true this will output very detailed SMTP messages.
|
## When set to true this will output very detailed SMTP messages.
|
||||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
# SMTP_DEBUG=false
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
## Accept Invalid Hostnames
|
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
|
||||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
|
||||||
|
|
||||||
## Accept Invalid Certificates
|
## Accept Invalid Certificates
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
## Require new device emails. When a user logs in an email is required to be sent.
|
## Accept Invalid Hostnames
|
||||||
## If sending the email fails the login attempt will fail!!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
# REQUIRE_DEVICE_EMAIL=false
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
#######################
|
||||||
|
### Rocket settings ###
|
||||||
|
#######################
|
||||||
|
|
||||||
|
## Rocket specific settings
|
||||||
|
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
||||||
|
# ROCKET_ADDRESS=0.0.0.0
|
||||||
|
## The default port is 8000, unless running in a Docker container, in which case it is 80.
|
||||||
|
# ROCKET_PORT=8000
|
||||||
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
## HIBP Api Key
|
|
||||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
|
||||||
# HIBP_API_KEY=
|
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
|
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
/.github @dani-garcia @BlackDex
|
||||||
|
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||||
|
/.github/workflows/** @dani-garcia @BlackDex
|
75
.github/workflows/build.yml
vendored
75
.github/workflows/build.yml
vendored
|
@ -8,9 +8,11 @@ on:
|
||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/build.yml"
|
- ".github/workflows/build.yml"
|
||||||
|
@ -18,19 +20,20 @@ on:
|
||||||
- "migrations/**"
|
- "migrations/**"
|
||||||
- "Cargo.*"
|
- "Cargo.*"
|
||||||
- "build.rs"
|
- "build.rs"
|
||||||
- "rust-toolchain"
|
- "rust-toolchain.toml"
|
||||||
- "rustfmt.toml"
|
- "rustfmt.toml"
|
||||||
- "diesel.toml"
|
- "diesel.toml"
|
||||||
|
- "docker/Dockerfile.j2"
|
||||||
|
- "docker/DockerSettings.yaml"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
# Make warnings errors, this is to prevent warnings slipping through.
|
# Make warnings errors, this is to prevent warnings slipping through.
|
||||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -43,13 +46,13 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: "Checkout"
|
- name: "Checkout"
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
- name: "Install dependencies Ubuntu"
|
- name: "Install dependencies Ubuntu"
|
||||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||||
# End Install dependencies
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
|
@ -59,7 +62,7 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then
|
||||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)"
|
||||||
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then
|
||||||
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)"
|
||||||
else
|
else
|
||||||
|
@ -71,7 +74,7 @@ jobs:
|
||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
|
@ -81,17 +84,19 @@ jobs:
|
||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@b44cb146d03e8d870c57ab64b80f04586349ca5d # master @ 2023-03-28 - 06:32 GMT+2
|
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
# End Install the MSRV channel to be used
|
# End Install the MSRV channel to be used
|
||||||
|
|
||||||
|
# Set the current matrix toolchain version as default
|
||||||
# Enable Rust Caching
|
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
|
||||||
- uses: Swatinem/rust-cache@2656b87321093db1cb55fbd73183d195214fdfd1 # v2.5.0
|
run: |
|
||||||
# End Enable Rust Caching
|
# Remove the rust-toolchain.toml
|
||||||
|
rm rust-toolchain.toml
|
||||||
|
# Set the default
|
||||||
|
rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
|
||||||
|
|
||||||
# Show environment
|
# Show environment
|
||||||
- name: "Show environment"
|
- name: "Show environment"
|
||||||
|
@ -100,47 +105,55 @@ jobs:
|
||||||
cargo -vV
|
cargo -vV
|
||||||
# End Show environment
|
# End Show environment
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
||||||
|
with:
|
||||||
|
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||||
|
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||||
|
# Only update when really needed! Use a <year>.<month>[.<inc>] format.
|
||||||
|
prefix-key: "v2023.07-rust"
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
# Run cargo tests (In release mode to speed up future builds)
|
# Run cargo tests
|
||||||
# First test all features together, afterwards test them separately.
|
# First test all features together, afterwards test them separately.
|
||||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: test_sqlite_mysql_postgresql_mimalloc
|
id: test_sqlite_mysql_postgresql_mimalloc
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
- name: "test features: sqlite,mysql,postgresql"
|
- name: "test features: sqlite,mysql,postgresql"
|
||||||
id: test_sqlite_mysql_postgresql
|
id: test_sqlite_mysql_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite,mysql,postgresql
|
cargo test --features sqlite,mysql,postgresql
|
||||||
|
|
||||||
- name: "test features: sqlite"
|
- name: "test features: sqlite"
|
||||||
id: test_sqlite
|
id: test_sqlite
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features sqlite
|
cargo test --features sqlite
|
||||||
|
|
||||||
- name: "test features: mysql"
|
- name: "test features: mysql"
|
||||||
id: test_mysql
|
id: test_mysql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features mysql
|
cargo test --features mysql
|
||||||
|
|
||||||
- name: "test features: postgresql"
|
- name: "test features: postgresql"
|
||||||
id: test_postgresql
|
id: test_postgresql
|
||||||
if: $${{ always() }}
|
if: $${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
cargo test --release --features postgresql
|
cargo test --features postgresql
|
||||||
# End Run cargo tests
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
# Run cargo clippy, and fail on warnings
|
||||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||||
id: clippy
|
id: clippy
|
||||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||||
run: |
|
run: |
|
||||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||||
# End Run cargo clippy
|
# End Run cargo clippy
|
||||||
|
|
||||||
|
|
||||||
|
@ -182,21 +195,3 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
|
||||||
# Build the binary to upload to the artifacts
|
|
||||||
- name: "build features: sqlite,mysql,postgresql"
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
run: |
|
|
||||||
cargo build --release --features sqlite,mysql,postgresql
|
|
||||||
# End Build the binary
|
|
||||||
|
|
||||||
|
|
||||||
# Upload artifact to Github Actions
|
|
||||||
- name: "Upload artifact"
|
|
||||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
|
||||||
with:
|
|
||||||
name: vaultwarden
|
|
||||||
path: target/release/vaultwarden
|
|
||||||
# End Upload artifact to Github Actions
|
|
||||||
|
|
7
.github/workflows/hadolint.yml
vendored
7
.github/workflows/hadolint.yml
vendored
|
@ -8,15 +8,14 @@ on: [
|
||||||
jobs:
|
jobs:
|
||||||
hadolint:
|
hadolint:
|
||||||
name: Validate Dockerfile syntax
|
name: Validate Dockerfile syntax
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
- name: Download hadolint
|
- name: Download hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
|
@ -30,5 +29,5 @@ jobs:
|
||||||
# Test Dockerfiles
|
# Test Dockerfiles
|
||||||
- name: Run hadolint
|
- name: Run hadolint
|
||||||
shell: bash
|
shell: bash
|
||||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored --cached | xargs hadolint
|
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||||
# End Test Dockerfiles
|
# End Test Dockerfiles
|
||||||
|
|
289
.github/workflows/release.yml
vendored
289
.github/workflows/release.yml
vendored
|
@ -2,21 +2,10 @@ name: Release
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
branches:
|
||||||
- ".github/workflows/release.yml"
|
|
||||||
- "src/**"
|
|
||||||
- "migrations/**"
|
|
||||||
- "hooks/**"
|
|
||||||
- "docker/**"
|
|
||||||
- "Cargo.*"
|
|
||||||
- "build.rs"
|
|
||||||
- "diesel.toml"
|
|
||||||
- "rust-toolchain"
|
|
||||||
|
|
||||||
branches: # Only on paths above
|
|
||||||
- main
|
- main
|
||||||
|
|
||||||
tags: # Always, regardless of paths above
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
@ -24,34 +13,31 @@ jobs:
|
||||||
# Some checks to determine if we need to continue with building a new docker.
|
# Some checks to determine if we need to continue with building a new docker.
|
||||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||||
skip_check:
|
skip_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
outputs:
|
outputs:
|
||||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
steps:
|
steps:
|
||||||
- name: Skip Duplicates Actions
|
- name: Skip Duplicates Actions
|
||||||
id: skip_check
|
id: skip_check
|
||||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
|
||||||
with:
|
with:
|
||||||
cancel_others: 'true'
|
cancel_others: 'true'
|
||||||
# Only run this when not creating a tag
|
# Only run this when not creating a tag
|
||||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
if: ${{ github.ref_type == 'branch' }}
|
||||||
|
|
||||||
docker-build:
|
docker-build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
# Start a local docker registry to be used to generate multi-arch images.
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
|
# Start a local docker registry to extract the final Alpine static build binaries
|
||||||
services:
|
services:
|
||||||
registry:
|
registry:
|
||||||
image: registry:2
|
image: registry:2
|
||||||
ports:
|
ports:
|
||||||
- 5000:5000
|
- 5000:5000
|
||||||
env:
|
env:
|
||||||
# Use BuildKit (https://docs.docker.com/build/buildkit/) for better
|
|
||||||
# build performance and the ability to copy extended file attributes
|
|
||||||
# (e.g., for executable capabilities) across build phases.
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
SOURCE_COMMIT: ${{ github.sha }}
|
SOURCE_COMMIT: ${{ github.sha }}
|
||||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||||
# The *_REPO variables need to be configured as repository variables
|
# The *_REPO variables need to be configured as repository variables
|
||||||
|
@ -65,7 +51,6 @@ jobs:
|
||||||
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
|
||||||
# Check for Quay.io credentials in secrets
|
# Check for Quay.io credentials in secrets
|
||||||
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
base_image: ["debian","alpine"]
|
base_image: ["debian","alpine"]
|
||||||
|
@ -73,163 +58,201 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Determine Docker Tag
|
- name: Initialize QEMU binfmt support
|
||||||
- name: Init Variables
|
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||||
id: vars
|
with:
|
||||||
|
platforms: "arm64,arm"
|
||||||
|
|
||||||
|
# Start Docker Buildx
|
||||||
|
- name: Setup Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
|
||||||
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
||||||
|
with:
|
||||||
|
buildkitd-config-inline: |
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 2
|
||||||
|
driver-opts: |
|
||||||
|
network=host
|
||||||
|
|
||||||
|
# Determine Base Tags and Source Version
|
||||||
|
- name: Determine Base Tags and Source Version
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# Check which main tag we are going to build determined by github.ref
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
|
||||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
|
||||||
fi
|
fi
|
||||||
# End Determine Docker Tag
|
|
||||||
|
# Get the Source Version for this release
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null || true)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
echo "SOURCE_VERSION=${GIT_EXACT_TAG}" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
echo "SOURCE_VERSION=${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" | tee -a "${GITHUB_ENV}"
|
||||||
|
fi
|
||||||
|
# End Determine Base Tags
|
||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for DockerHub
|
||||||
|
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Add registry for ghcr.io
|
||||||
|
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
password: ${{ secrets.QUAY_TOKEN }}
|
password: ${{ secrets.QUAY_TOKEN }}
|
||||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
|
|
||||||
# Debian
|
- name: Add registry for Quay.io
|
||||||
|
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
|
||||||
# Docker Hub
|
|
||||||
- name: Build Debian based images (docker.io)
|
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (docker.io)
|
- name: Configure build cache from/to
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
#
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||||
|
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||||
|
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},mode=max" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
echo "BAKE_CACHE_FROM="
|
||||||
|
echo "BAKE_CACHE_TO="
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
|
||||||
# GitHub Container Registry
|
- name: Add localhost registry
|
||||||
- name: Build Debian based images (ghcr.io)
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/build
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (ghcr.io)
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
|
uses: docker/bake-action@73b0efa7a0e8ac276e0a8d5c580698a942ff10b5 # v4.4.0
|
||||||
|
env:
|
||||||
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
|
SOURCE_VERSION: "${{ env.SOURCE_VERSION }}"
|
||||||
|
SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}"
|
||||||
|
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
|
||||||
|
with:
|
||||||
|
pull: true
|
||||||
|
push: true
|
||||||
|
files: docker/docker-bake.hcl
|
||||||
|
targets: "${{ matrix.base_image }}-multi"
|
||||||
|
set: |
|
||||||
|
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||||
|
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||||
|
|
||||||
|
|
||||||
|
# Extract the Alpine binaries from the containers
|
||||||
|
- name: Extract binaries
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
run: |
|
||||||
./hooks/push
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_GHCR_LOGIN == 'true' }}
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
|
EXTRACT_TAG="latest"
|
||||||
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
|
EXTRACT_TAG="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
# Quay.io
|
# After each extraction the image is removed.
|
||||||
- name: Build Debian based images (quay.io)
|
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Debian based images (quay.io)
|
# Extract amd64 binary
|
||||||
shell: bash
|
docker create --name amd64 --platform=linux/amd64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp amd64:/vaultwarden vaultwarden-amd64
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
docker rm --force amd64
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'debian' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Alpine
|
# Extract arm64 binary
|
||||||
|
docker create --name arm64 --platform=linux/arm64 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp arm64:/vaultwarden vaultwarden-arm64
|
||||||
|
docker rm --force arm64
|
||||||
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
# Docker Hub
|
# Extract armv7 binary
|
||||||
- name: Build Alpine based images (docker.io)
|
docker create --name armv7 --platform=linux/arm/v7 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
shell: bash
|
docker cp armv7:/vaultwarden vaultwarden-armv7
|
||||||
env:
|
docker rm --force armv7
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (docker.io)
|
# Extract armv6 binary
|
||||||
shell: bash
|
docker create --name armv6 --platform=linux/arm/v6 "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
env:
|
docker cp armv6:/vaultwarden vaultwarden-armv6
|
||||||
DOCKER_REPO: "${{ vars.DOCKERHUB_REPO }}"
|
docker rm --force armv6
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
run: |
|
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_DOCKERHUB_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# GitHub Container Registry
|
# Upload artifacts to Github Actions
|
||||||
- name: Build Alpine based images (ghcr.io)
|
- name: "Upload amd64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||||
run: |
|
path: vaultwarden-amd64
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (ghcr.io)
|
- name: "Upload arm64 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.GHCR_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||||
run: |
|
path: vaultwarden-arm64
|
||||||
./hooks/push
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_GHCR_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
# Quay.io
|
- name: "Upload armv7 artifact"
|
||||||
- name: Build Alpine based images (quay.io)
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
shell: bash
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
env:
|
with:
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
path: vaultwarden-armv7
|
||||||
run: |
|
|
||||||
./hooks/build
|
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
||||||
- name: Push Alpine based images (quay.io)
|
- name: "Upload armv6 artifact"
|
||||||
shell: bash
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
env:
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
DOCKER_REPO: "${{ vars.QUAY_REPO }}"
|
with:
|
||||||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||||
run: |
|
path: vaultwarden-armv6
|
||||||
./hooks/push
|
# End Upload artifacts to Github Actions
|
||||||
if: ${{ matrix.base_image == 'alpine' && env.HAVE_QUAY_LOGIN == 'true' }}
|
|
||||||
|
|
26
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
26
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
manual_trigger:
|
||||||
|
description: "Manual trigger buildcache cleanup"
|
||||||
|
required: false
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
schedule:
|
||||||
|
- cron: '0 1 * * FRI'
|
||||||
|
|
||||||
|
name: Cleanup
|
||||||
|
jobs:
|
||||||
|
releasecache-cleanup:
|
||||||
|
name: Releasecache Cleanup
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
continue-on-error: true
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- name: Delete vaultwarden-buildcache containers
|
||||||
|
uses: actions/delete-package-versions@e5bc658cc4c965c472efe991f8beea3981499c55 # v5.0.0
|
||||||
|
with:
|
||||||
|
package-name: 'vaultwarden-buildcache'
|
||||||
|
package-type: 'container'
|
||||||
|
min-versions-to-keep: 0
|
||||||
|
delete-only-untagged-versions: 'false'
|
42
.github/workflows/trivy.yml
vendored
Normal file
42
.github/workflows/trivy.yml
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
name: trivy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
pull_request:
|
||||||
|
branches: [ "main" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '00 12 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trivy-scan:
|
||||||
|
name: Check
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
|
||||||
|
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # v0.19.0
|
||||||
|
with:
|
||||||
|
scan-type: repo
|
||||||
|
ignore-unfixed: true
|
||||||
|
format: sarif
|
||||||
|
output: trivy-results.sarif
|
||||||
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.3
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
|
@ -1,10 +1,12 @@
|
||||||
ignored:
|
ignored:
|
||||||
|
# To prevent issues and make clear some images only work on linux/amd64, we ignore this
|
||||||
|
- DL3029
|
||||||
# disable explicit version for apt install
|
# disable explicit version for apt install
|
||||||
- DL3008
|
- DL3008
|
||||||
# disable explicit version for apk install
|
# disable explicit version for apk install
|
||||||
- DL3018
|
- DL3018
|
||||||
# disable check for consecutive `RUN` instructions
|
# Ignore shellcheck info message
|
||||||
- DL3059
|
- SC1091
|
||||||
trustedRegistries:
|
trustedRegistries:
|
||||||
- docker.io
|
- docker.io
|
||||||
- ghcr.io
|
- ghcr.io
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.4.0
|
rev: v4.5.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-json
|
- id: check-json
|
||||||
|
|
2283
Cargo.lock
generated
2283
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
180
Cargo.toml
180
Cargo.toml
|
@ -3,7 +3,7 @@ name = "vaultwarden"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.68.2"
|
rust-version = "1.76.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
|
@ -36,75 +36,74 @@ unstable = []
|
||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
syslog = "6.1.0"
|
syslog = "6.1.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.19"
|
log = "0.4.21"
|
||||||
fern = { version = "0.6.2", features = ["syslog-6"] }
|
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenvy = { version = "0.15.7", default-features = false }
|
dotenvy = { version = "0.15.7", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.18.0"
|
once_cell = "1.19.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.19"
|
||||||
num-derive = "0.4.0"
|
num-derive = "0.4.2"
|
||||||
|
bigdecimal = "0.4.3"
|
||||||
|
|
||||||
# Web framework
|
# Web framework
|
||||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
|
||||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
rocket_ws = { version ="0.1.1" }
|
||||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
tokio-tungstenite = "0.19.0"
|
rmpv = "1.3.0" # MessagePack library
|
||||||
rmpv = "1.0.0" # MessagePack library
|
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.4.0"
|
dashmap = "5.5.3"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.28"
|
futures = "0.3.30"
|
||||||
tokio = { version = "1.29.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.166", features = ["derive"] }
|
serde = { version = "1.0.202", features = ["derive"] }
|
||||||
serde_json = "1.0.99"
|
serde_json = "1.0.117"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.1.0", features = ["chrono", "r2d2"] }
|
diesel = { version = "2.1.6", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.1.0"
|
diesel_migrations = "2.1.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.28.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.16.20"
|
ring = "0.17.8"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.4.0", features = ["v4"] }
|
uuid = { version = "1.8.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.26", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.3"
|
chrono-tz = "0.9.0"
|
||||||
time = "0.3.22"
|
time = "0.3.36"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.0.5"
|
||||||
|
|
||||||
# Data encoding library Hex/Base32/Base64
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.4.0"
|
data-encoding = "2.6.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "8.3.0"
|
jsonwebtoken = "9.3.0"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.0"
|
totp-lite = "2.0.1"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||||
|
@ -113,71 +112,138 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
||||||
webauthn-rs = "0.3.2"
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Handling of URL's for WebAuthn and favicons
|
# Handling of URL's for WebAuthn and favicons
|
||||||
url = "2.4.0"
|
url = "2.5.0"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.4"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "4.3.7", features = ["dir_source"] }
|
handlebars = { version = "5.1.2", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.18", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
reqwest = { version = "0.12.4", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||||
|
hickory-resolver = "0.24.1"
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.3"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.8.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.0"
|
data-url = "0.3.1"
|
||||||
bytes = "1.4.0"
|
bytes = "1.6.0"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = "0.44.0"
|
cached = { version = "0.51.3", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.16.2"
|
cookie = "0.18.1"
|
||||||
cookie_store = "0.19.1"
|
cookie_store = "0.21.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.55"
|
openssl = "0.10.64"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.13"
|
paste = "1.0.15"
|
||||||
governor = "0.5.1"
|
governor = "0.6.3"
|
||||||
|
|
||||||
# Check client versions for specific features.
|
# Check client versions for specific features.
|
||||||
semver = "1.0.17"
|
semver = "1.0.23"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.37", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.42", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "4.4.0"
|
which = "6.0.1"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.0"
|
argon2 = "0.5.3"
|
||||||
|
|
||||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||||
rpassword = "7.2.0"
|
rpassword = "7.3.1"
|
||||||
|
|
||||||
[patch.crates-io]
|
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
|
|
||||||
# Strip debuginfo from the release builds
|
# Strip debuginfo from the release builds
|
||||||
# Also enable thin LTO for some optimizations
|
# The symbols are the provide better panic traces
|
||||||
|
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||||
[profile.release]
|
[profile.release]
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
lto = "thin"
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
|
||||||
|
|
||||||
|
# A little bit of a speedup
|
||||||
|
[profile.dev]
|
||||||
|
split-debuginfo = "unpacked"
|
||||||
|
|
||||||
# Always build argon2 using opt-level 3
|
# Always build argon2 using opt-level 3
|
||||||
# This is a huge speed improvement during testing
|
# This is a huge speed improvement during testing
|
||||||
[profile.dev.package.argon2]
|
[profile.dev.package.argon2]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
|
||||||
# A little bit of a speedup
|
# Optimize for size
|
||||||
[profile.dev]
|
[profile.release-micro]
|
||||||
split-debuginfo = "unpacked"
|
inherits = "release"
|
||||||
|
opt-level = "z"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
panic = "abort"
|
||||||
|
|
||||||
|
# Profile for systems with low resources
|
||||||
|
# It will use less resources during build
|
||||||
|
[profile.release-low]
|
||||||
|
inherits = "release"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "thin"
|
||||||
|
codegen-units = 16
|
||||||
|
|
||||||
|
# Linting config
|
||||||
|
[lints.rust]
|
||||||
|
# Forbid
|
||||||
|
unsafe_code = "forbid"
|
||||||
|
non_ascii_idents = "forbid"
|
||||||
|
|
||||||
|
# Deny
|
||||||
|
future_incompatible = { level = "deny", priority = -1 }
|
||||||
|
noop_method_call = "deny"
|
||||||
|
pointer_structural_match = "deny"
|
||||||
|
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||||
|
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||||
|
trivial_casts = "deny"
|
||||||
|
trivial_numeric_casts = "deny"
|
||||||
|
unused = { level = "deny", priority = -1 }
|
||||||
|
unused_import_braces = "deny"
|
||||||
|
unused_lifetimes = "deny"
|
||||||
|
deprecated_in_future = "deny"
|
||||||
|
|
||||||
|
[lints.clippy]
|
||||||
|
# Allow
|
||||||
|
# We need this since Rust v1.76+, since it has some bugs
|
||||||
|
# https://github.com/rust-lang/rust-clippy/issues/12016
|
||||||
|
blocks_in_conditions = "allow"
|
||||||
|
|
||||||
|
# Deny
|
||||||
|
cast_lossless = "deny"
|
||||||
|
clone_on_ref_ptr = "deny"
|
||||||
|
equatable_if_let = "deny"
|
||||||
|
float_cmp_const = "deny"
|
||||||
|
inefficient_to_string = "deny"
|
||||||
|
iter_on_empty_collections = "deny"
|
||||||
|
iter_on_single_items = "deny"
|
||||||
|
linkedlist = "deny"
|
||||||
|
macro_use_imports = "deny"
|
||||||
|
manual_assert = "deny"
|
||||||
|
manual_instant_elapsed = "deny"
|
||||||
|
manual_string_new = "deny"
|
||||||
|
match_wildcard_for_single_variants = "deny"
|
||||||
|
mem_forget = "deny"
|
||||||
|
needless_lifetimes = "deny"
|
||||||
|
string_add_assign = "deny"
|
||||||
|
string_to_string = "deny"
|
||||||
|
unnecessary_join = "deny"
|
||||||
|
unnecessary_self_imports = "deny"
|
||||||
|
unused_async = "deny"
|
||||||
|
verbose_file_reads = "deny"
|
||||||
|
zero_sized_map_values = "deny"
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
docker/amd64/Dockerfile
|
docker/Dockerfile.debian
|
|
@ -42,7 +42,7 @@ docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p
|
||||||
```
|
```
|
||||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||||
|
|
||||||
**IMPORTANT**: Most modern web browsers, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||||
|
|
||||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||||
|
|
||||||
|
@ -92,4 +92,11 @@ Thanks for your contribution to the project!
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/IQ333777" style="width: 75px">
|
||||||
|
<sub><b>IQ333777</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
19
build.rs
19
build.rs
|
@ -17,8 +17,15 @@ fn main() {
|
||||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Rerun when these paths are changed.
|
||||||
|
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||||
|
println!("cargo:rerun-if-changed=.git");
|
||||||
|
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
|
println!("cargo:rerun-if-changed=.git/index");
|
||||||
|
println!("cargo:rerun-if-changed=.git/refs/tags");
|
||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||||
|
|
||||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||||
// If neither exist, read from git.
|
// If neither exist, read from git.
|
||||||
|
@ -42,11 +49,11 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||||
|
|
||||||
/// This method reads info from Git, namely tags, branch, and revision
|
/// This method reads info from Git, namely tags, branch, and revision
|
||||||
/// To access these values, use:
|
/// To access these values, use:
|
||||||
/// - env!("GIT_EXACT_TAG")
|
/// - `env!("GIT_EXACT_TAG")`
|
||||||
/// - env!("GIT_LAST_TAG")
|
/// - `env!("GIT_LAST_TAG")`
|
||||||
/// - env!("GIT_BRANCH")
|
/// - `env!("GIT_BRANCH")`
|
||||||
/// - env!("GIT_REV")
|
/// - `env!("GIT_REV")`
|
||||||
/// - env!("VW_VERSION")
|
/// - `env!("VW_VERSION")`
|
||||||
fn version_from_git_info() -> Result<String, std::io::Error> {
|
fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||||
// The exact tag for the current commit, can be empty when
|
// The exact tag for the current commit, can be empty when
|
||||||
// the current commit doesn't have an associated tag
|
// the current commit doesn't have an associated tag
|
||||||
|
|
28
docker/DockerSettings.yaml
Normal file
28
docker/DockerSettings.yaml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
---
|
||||||
|
vault_version: "v2024.5.0"
|
||||||
|
vault_image_digest: "sha256:784838b15c775c81b29e8979aaac36dc5ef44ea18ff0adb7fc56c7c62886319b"
|
||||||
|
# Cross Compile Docker Helper Scripts v1.4.0
|
||||||
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
|
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
|
||||||
|
rust_version: 1.79.0 # Rust version to be used
|
||||||
|
debian_version: bookworm # Debian release name to be used
|
||||||
|
alpine_version: "3.20" # Alpine version to be used
|
||||||
|
# For which platforms/architectures will we try to build images
|
||||||
|
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
# Determine the build images per OS/Arch
|
||||||
|
build_stage_image:
|
||||||
|
debian:
|
||||||
|
image: "docker.io/library/rust:{{rust_version}}-slim-{{debian_version}}"
|
||||||
|
platform: "$BUILDPLATFORM"
|
||||||
|
alpine:
|
||||||
|
image: "build_${TARGETARCH}${TARGETVARIANT}"
|
||||||
|
platform: "linux/amd64" # The Alpine build images only have linux/amd64 images
|
||||||
|
arch_image:
|
||||||
|
amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}"
|
||||||
|
arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}"
|
||||||
|
armv7: "ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-{{rust_version}}"
|
||||||
|
armv6: "ghcr.io/blackdex/rust-musl:arm-musleabi-stable-{{rust_version}}"
|
||||||
|
# The final image which will be used to distribute the container images
|
||||||
|
runtime_stage_image:
|
||||||
|
debian: "docker.io/library/debian:{{debian_version}}-slim"
|
||||||
|
alpine: "docker.io/library/alpine:{{alpine_version}}"
|
160
docker/Dockerfile.alpine
Normal file
160
docker/Dockerfile.alpine
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.5.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.0
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:784838b15c775c81b29e8979aaac36dc5ef44ea18ff0adb7fc56c7c62886319b]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:784838b15c775c81b29e8979aaac36dc5ef44ea18ff0adb7fc56c7c62886319b
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.5.0]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:784838b15c775c81b29e8979aaac36dc5ef44ea18ff0adb7fc56c7c62886319b as vault
|
||||||
|
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.79.0 as build_amd64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.79.0 as build_arm64
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.79.0 as build_armv7
|
||||||
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.79.0 as build_armv6
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root" \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
|
rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Environment variables for Cargo on Alpine based builds
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apk --no-cache add \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
openssl \
|
||||||
|
tzdata
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
|
@ -1,34 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
|
||||||
# manifest, rather than the target arch. For example:
|
|
||||||
#
|
|
||||||
# $ docker inspect vaultwarden/server:latest-armv7 | jq -r '.[]|.Architecture'
|
|
||||||
# amd64
|
|
||||||
#
|
|
||||||
# Recent versions of Docker have started printing a warning when the image's
|
|
||||||
# claimed arch doesn't match the host arch. For example:
|
|
||||||
#
|
|
||||||
# WARNING: The requested image's platform (linux/amd64) does not match the
|
|
||||||
# detected host platform (linux/arm/v7) and no specific platform was requested
|
|
||||||
#
|
|
||||||
# The image still works fine, but the spurious warning creates confusion.
|
|
||||||
#
|
|
||||||
# Docker doesn't seem to provide a way to directly set the arch of an image
|
|
||||||
# at build time. To resolve the build vs. target arch discrepancy, we use
|
|
||||||
# Docker Buildx to build a new set of images with the correct target arch.
|
|
||||||
#
|
|
||||||
# Docker Buildx uses this Dockerfile to build an image for each requested
|
|
||||||
# platform. Since the Dockerfile basically consists of a single `FROM`
|
|
||||||
# instruction, we're effectively telling Buildx to build a platform-specific
|
|
||||||
# image by simply copying the existing cross-built image and setting the
|
|
||||||
# correct target arch as a side effect.
|
|
||||||
#
|
|
||||||
# References:
|
|
||||||
#
|
|
||||||
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
|
||||||
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
|
||||||
#
|
|
||||||
ARG LOCAL_REPO
|
|
||||||
ARG DOCKER_TAG
|
|
||||||
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
|
201
docker/Dockerfile.debian
Normal file
201
docker/Dockerfile.debian
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# This file was generated using a Jinja2 template.
|
||||||
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
|
|
||||||
|
# Using multistage build:
|
||||||
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
|
|
||||||
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
|
# Using the digest instead of the tag name provides better security,
|
||||||
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
# be changed to point to a malicious image.
|
||||||
|
#
|
||||||
|
# To verify the current digest for a given tag name:
|
||||||
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.5.0
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.0
|
||||||
|
# [docker.io/vaultwarden/web-vault@sha256:784838b15c775c81b29e8979aaac36dc5ef44ea18ff0adb7fc56c7c62886319b]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:784838b15c775c81b29e8979aaac36dc5ef44ea18ff0adb7fc56c7c62886319b
|
||||||
|
# [docker.io/vaultwarden/web-vault:v2024.5.0]
|
||||||
|
#
|
||||||
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:784838b15c775c81b29e8979aaac36dc5ef44ea18ff0adb7fc56c7c62886319b as vault
|
||||||
|
|
||||||
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.79.0-slim-bookworm as build
|
||||||
|
COPY --from=xx / /
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
LANG=C.UTF-8 \
|
||||||
|
TZ=UTC \
|
||||||
|
TERM=xterm-256color \
|
||||||
|
CARGO_HOME="/root/.cargo" \
|
||||||
|
USER="root"
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
|
rustup set profile minimal
|
||||||
|
|
||||||
|
# Creates a dummy project used to grab dependencies
|
||||||
|
RUN USER=root cargo new --bin /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
|
# Builds your dependencies and removes the
|
||||||
|
# dummy project, except the target folder
|
||||||
|
# This folder contains the compiled dependencies
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
|
# Copies the complete project
|
||||||
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
######################## RUNTIME IMAGE ########################
|
||||||
|
# Create a new stage with a minimal image
|
||||||
|
# because we already have a binary built
|
||||||
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM docker.io/library/debian:bookworm-slim
|
||||||
|
|
||||||
|
ENV ROCKET_PROFILE="release" \
|
||||||
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
|
ROCKET_PORT=80 \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Create data folder and Install needed libraries
|
||||||
|
RUN mkdir /data && \
|
||||||
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
EXPOSE 80
|
||||||
|
EXPOSE 3012
|
||||||
|
|
||||||
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
|
# and the binary from the "build" stage to the current stage
|
||||||
|
WORKDIR /
|
||||||
|
|
||||||
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
CMD ["/start.sh"]
|
|
@ -1,68 +1,14 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||||
{% set rust_version = "1.70.0" %}
|
# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine`
|
||||||
{% set debian_version = "bullseye" %}
|
|
||||||
{% set alpine_version = "3.17" %}
|
|
||||||
{% set build_stage_base_image = "docker.io/library/rust:%s-%s" % (rust_version, debian_version) %}
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "amd64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:x86_64-musl-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:armv7-musleabihf-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:arm-musleabi-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set build_stage_base_image = "docker.io/blackdex/rust-musl:aarch64-musl-stable-%s" % rust_version %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-alpine:%s" % alpine_version %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
|
||||||
{% endif %}
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/library/debian:%s-slim" % debian_version %}
|
|
||||||
{% elif "arm64" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/aarch64-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "arm64" %}
|
|
||||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
|
||||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
|
||||||
{% elif "armv6" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/rpi-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armel" %}
|
|
||||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
|
||||||
{% elif "armv7" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "docker.io/balenalib/armv7hf-debian:%s" % debian_version %}
|
|
||||||
{% set package_arch_name = "armhf" %}
|
|
||||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
|
||||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_name is defined %}
|
|
||||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_prefix = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
|
||||||
{% else %}
|
|
||||||
{% set package_arch_target_param = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if "buildkit" in target_file %}
|
|
||||||
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
|
|
||||||
{% else %}
|
|
||||||
{% set mount_rust_cache = "" %}
|
|
||||||
{% endif %}
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
{% set vault_version = "v2023.5.0" %}
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_digest = "sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085" %}
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# Using the digest instead of the tag name provides better security,
|
# Using the digest instead of the tag name provides better security,
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
@ -80,10 +26,33 @@
|
||||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||||
#
|
#
|
||||||
FROM docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
{% if base == "debian" %}
|
||||||
FROM {{ build_stage_base_image }} as build
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
|
## And these bash scripts do not have any significant difference if at all
|
||||||
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
|
{% for arch in build_stage_image[base].arch_image %}
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
########################## BUILD IMAGE ##########################
|
||||||
|
# hadolint ignore=DL3006
|
||||||
|
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
||||||
|
{% if base == "debian" %}
|
||||||
|
COPY --from=xx / /
|
||||||
|
{% endif %}
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
|
@ -92,134 +61,172 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
TERM=xterm-256color \
|
TERM=xterm-256color \
|
||||||
CARGO_HOME="/root/.cargo" \
|
CARGO_HOME="/root/.cargo" \
|
||||||
USER="root"
|
USER="root"
|
||||||
|
{%- if base == "alpine" %} \
|
||||||
|
# Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11
|
||||||
|
# Debian Bookworm already contains libpq v15
|
||||||
|
PQ_LIB_DIR="/usr/local/musl/pq15/lib"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if base == "debian" %}
|
||||||
|
|
||||||
|
# Install clang to get `xx-cargo` working
|
||||||
|
# Install pkg-config to allow amd64 builds to find all libraries
|
||||||
|
# Install git so build.rs can determine the correct version
|
||||||
|
# Install the libc cross packages based upon the debian-arch
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
clang \
|
||||||
|
pkg-config \
|
||||||
|
git \
|
||||||
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
|
xx-apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
libmariadb3 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
|
# Force install arch dependend mariadb dev packages
|
||||||
|
# Installing them the normal way breaks several other packages (again)
|
||||||
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
{% if "armv6" in target_file %}
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
|
||||||
{% endif %}
|
|
||||||
{% elif "arm" in target_file %}
|
|
||||||
# Install build dependencies for the {{ package_arch_name }} architecture
|
|
||||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-{{ package_cross_compiler }} \
|
|
||||||
libc6-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
|
||||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
|
||||||
libmariadb3{{ package_arch_prefix }} \
|
|
||||||
libpq-dev{{ package_arch_prefix }} \
|
|
||||||
libpq5{{ package_arch_prefix }} \
|
|
||||||
libssl-dev{{ package_arch_prefix }} \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
|
||||||
{% elif "amd64" in target_file %}
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
{% if base == "debian" %}
|
||||||
COPY ./Cargo.* ./
|
# Environment variables for Cargo on Debian based builds
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
COPY ./build.rs ./build.rs
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \
|
||||||
|
echo "export CROSS_COMPILE=1" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \
|
||||||
|
echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
# Environment variables for Cargo on Alpine based builds
|
||||||
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
|
# Output the current contents of the file
|
||||||
|
cat /env-cargo
|
||||||
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
|
# Copies over *only* your manifests and build files
|
||||||
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
{% if "alpine" in target_file %}
|
{% if base == "debian" %}
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% elif base == "alpine" %}
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
{% else %}
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
|
RUN source /env-cargo && \
|
||||||
&& find . -not -path "./target*" -delete
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
ARG VW_VERSION
|
||||||
RUN touch src/main.rs
|
|
||||||
|
# Builds again, this time it will be the actual source files being build
|
||||||
|
RUN source /env-cargo && \
|
||||||
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \
|
||||||
|
else \
|
||||||
|
ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM {{ runtime_stage_base_image }}
|
#
|
||||||
|
# To build these images you need to have qemu binfmt support.
|
||||||
|
# See the following pages to help install these tools locally
|
||||||
|
# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation
|
||||||
|
# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64
|
||||||
|
#
|
||||||
|
# Or use a Docker image which modifies your host system to support this.
|
||||||
|
# The GitHub Actions Workflow uses the same image as used below.
|
||||||
|
# See: https://github.com/tonistiigi/binfmt
|
||||||
|
# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
#
|
||||||
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
|
FROM --platform=$TARGETPLATFORM {{ runtime_stage_image[base] }}
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
ENV ROCKET_PROFILE="release" \
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
ROCKET_PORT=80
|
ROCKET_PORT=80
|
||||||
{%- if "alpine" in runtime_stage_base_image %} \
|
{%- if base == "debian" %} \
|
||||||
|
DEBIAN_FRONTEND=noninteractive
|
||||||
|
{% elif base == "alpine" %} \
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
SSL_CERT_DIR=/etc/ssl/certs
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
# Create data folder and Install needed libraries
|
||||||
RUN mkdir /data \
|
RUN mkdir /data && \
|
||||||
{% if "alpine" in runtime_stage_base_image %}
|
{% if base == "debian" %}
|
||||||
&& apk add --no-cache \
|
apt-get update && apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
|
openssl && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
apk --no-cache add \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
openssl \
|
openssl \
|
||||||
tzdata
|
tzdata
|
||||||
{% else %}
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if "amd64" not in target_file %}
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
@ -229,15 +236,11 @@ EXPOSE 3012
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
{% if package_arch_target is defined %}
|
|
||||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
|
||||||
{% else %}
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
COPY --from=vault /web-vault ./web-vault
|
||||||
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||||
|
|
||||||
|
|
|
@ -1,15 +1,4 @@
|
||||||
OBJECTS := $(shell find ./ -mindepth 2 -name 'Dockerfile*')
|
all:
|
||||||
|
./render_template Dockerfile.j2 '{"base": "debian"}' > Dockerfile.debian
|
||||||
all: $(OBJECTS)
|
./render_template Dockerfile.j2 '{"base": "alpine"}' > Dockerfile.alpine
|
||||||
|
.PHONY: all
|
||||||
%/Dockerfile: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
||||||
%/Dockerfile.buildkit.alpine: Dockerfile.j2 render_template
|
|
||||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
|
||||||
|
|
189
docker/README.md
189
docker/README.md
|
@ -1,3 +1,188 @@
|
||||||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
# Vaultwarden Container Building
|
||||||
|
|
||||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
To build and release new testing and stable releases of Vaultwarden we use `docker buildx bake`.<br>
|
||||||
|
This can be used locally by running the command yourself, but it is also used by GitHub Actions.
|
||||||
|
|
||||||
|
This makes it easier for us to test and maintain the different architectures we provide.<br>
|
||||||
|
We also just have two Dockerfile's one for Debian and one for Alpine based images.<br>
|
||||||
|
With just these two files we can build both Debian and Alpine images for the following platforms:
|
||||||
|
- amd64 (linux/amd64)
|
||||||
|
- arm64 (linux/arm64)
|
||||||
|
- armv7 (linux/arm/v7)
|
||||||
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
|
||||||
|
- 386 (linux/386)
|
||||||
|
- ppc64le (linux/ppc64le)
|
||||||
|
- s390x (linux/s390x)
|
||||||
|
|
||||||
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
|
**NOTE**: Run all the examples below from the root of the repo.<br>
|
||||||
|
|
||||||
|
|
||||||
|
## How to install QEMU binfmt support
|
||||||
|
|
||||||
|
This is different per host OS, but most support this in some way.<br>
|
||||||
|
|
||||||
|
### Ubuntu/Debian
|
||||||
|
```bash
|
||||||
|
apt install binfmt-support qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Arch Linux (others based upon it)
|
||||||
|
```bash
|
||||||
|
pacman -S qemu-user-static qemu-user-static-binfmt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fedora
|
||||||
|
```bash
|
||||||
|
dnf install qemu-user-static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Others
|
||||||
|
There also is an option to use an other docker container to provide support for this.
|
||||||
|
```bash
|
||||||
|
# To install and activate
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||||
|
# To unistall
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Single architecture container building
|
||||||
|
|
||||||
|
You can build a container per supported architecture as long as you have QEMU binfmt support installed on your system.<br>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default bake triggers a Debian build using the hosts architecture
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl
|
||||||
|
|
||||||
|
# Bake Debian ARM64 using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl debian-arm64
|
||||||
|
|
||||||
|
# Bake Alpine ARMv6 as a release build
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-armv6
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Local Multi Architecture container building
|
||||||
|
|
||||||
|
Start the initialization, this only needs to be done once.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create and use a new buildx builder instance which connects to the host network
|
||||||
|
docker buildx create --name vaultwarden --use --driver-opt network=host
|
||||||
|
|
||||||
|
# Validate it runs
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
|
|
||||||
|
# Create a local container registry directly reachable on the localhost
|
||||||
|
docker run -d --name registry --network host registry:2
|
||||||
|
```
|
||||||
|
|
||||||
|
After that is done, you should be able to build and push to the local registry.<br>
|
||||||
|
Use the following command with the modified variables to bake the Alpine images.<br>
|
||||||
|
Replace `alpine` with `debian` if you want to build the debian multi arch images.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start a buildx bake using a debug build
|
||||||
|
CARGO_PROFILE=dev \
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)" \
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
docker buildx bake --file docker/docker-bake.hcl alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `bake.sh` script
|
||||||
|
|
||||||
|
To make it a bit more easier to trigger a build, there also is a `bake.sh` script.<br>
|
||||||
|
This script calls `docker buildx bake` with all the right parameters and also generates the `SOURCE_COMMIT` and `SOURCE_VERSION` variables.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
So, if you want to build a Multi Arch Alpine container pushing to your localhost registry you can run this from within the docker directory. (Just make sure you executed the initialization steps above first)
|
||||||
|
```bash
|
||||||
|
CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \
|
||||||
|
./bake.sh alpine-multi
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.<br>
|
||||||
|
This will also append those values to the tag so you can see the builded container when running `docker images`.
|
||||||
|
|
||||||
|
You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use.
|
||||||
|
```bash
|
||||||
|
docker/bake.sh alpine-all --print
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing baked images
|
||||||
|
|
||||||
|
To test these images you can run these images by using the correct tag and provide the platform.<br>
|
||||||
|
For example, after you have build an arm64 image via `./bake.sh debian-arm64` you can run:
|
||||||
|
```bash
|
||||||
|
docker run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Using the `podman-bake.sh` script
|
||||||
|
|
||||||
|
To also make building easier using podman, there is a `podman-bake.sh` script.<br>
|
||||||
|
This script calls `podman buildx build` with the needed parameters and the same as `bake.sh`, it will generate some variables automatically.<br>
|
||||||
|
This script can be called from both the repo root or within the docker directory.
|
||||||
|
|
||||||
|
**NOTE:** Unlike the `bake.sh` script, this only supports a single `CONTAINER_REGISTRIES`, and a single `BASE_TAGS` value, no comma separated values. It also only supports building separate architectures, no Multi Arch containers.
|
||||||
|
|
||||||
|
To build an Alpine arm64 image with only sqlite support and mimalloc, run this:
|
||||||
|
```bash
|
||||||
|
DB="sqlite,enable_mimalloc" \
|
||||||
|
./podman-bake.sh alpine-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you want to just build a Debian container from the repo root, you can run this.
|
||||||
|
```bash
|
||||||
|
docker/podman-bake.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You can append extra arguments after the target if you want. This can be useful for example to disable cache like this.
|
||||||
|
```bash
|
||||||
|
./podman-bake.sh alpine-arm64 --no-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.<br>
|
||||||
|
|
||||||
|
### Testing podman builded images
|
||||||
|
|
||||||
|
The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run --rm -it \
|
||||||
|
-e DISABLE_ADMIN_TOKEN=true \
|
||||||
|
-e I_REALLY_WANT_VOLATILE_STORAGE=true \
|
||||||
|
-p8080:80 --platform=linux/arm64 \
|
||||||
|
localhost/vaultwarden/server:testing-arm64
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Variables supported
|
||||||
|
| Variable | default | description |
|
||||||
|
| --------------------- | ------------------ | ----------- |
|
||||||
|
| CARGO_PROFILE | null | Which cargo profile to use. `null` means what is defined in the Dockerfile |
|
||||||
|
| DB | null | Which `features` to build. `null` means what is defined in the Dockerfile |
|
||||||
|
| SOURCE_REPOSITORY_URL | null | The source repository form where this build is triggered |
|
||||||
|
| SOURCE_COMMIT | null | The commit hash of the current commit for this build |
|
||||||
|
| SOURCE_VERSION | null | The current exact tag of this commit, else the last tag and the first 8 chars of the source commit |
|
||||||
|
| BASE_TAGS | testing | Tags to be used. Can be a comma separated value like "latest,1.29.2" |
|
||||||
|
| CONTAINER_REGISTRIES | vaultwarden/server | Comma separated value of container registries. Like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` |
|
||||||
|
| VW_VERSION | null | To override the `SOURCE_VERSION` value. This is also used by the `build.rs` code for example |
|
||||||
|
|
|
@ -1,118 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,112 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,118 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
libpq-dev
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/debian:bullseye-slim
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,112 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:x86_64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/library/alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,139 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,114 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,139 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the arm64 architecture
|
|
||||||
RUN dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6-dev:arm64 \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
libmariadb-dev-compat:arm64 \
|
|
||||||
libmariadb3:arm64 \
|
|
||||||
libpq-dev:arm64 \
|
|
||||||
libpq5:arm64 \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,114 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:aarch64-musl-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/aarch64-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,143 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,116 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,143 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armel architecture
|
|
||||||
RUN dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev:armel \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
libmariadb-dev-compat:armel \
|
|
||||||
libmariadb3:armel \
|
|
||||||
libpq-dev:armel \
|
|
||||||
libpq5:armel \
|
|
||||||
libssl-dev:armel \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
|
||||||
# This symlink was there in the buster images, and for some reason this is needed.
|
|
||||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,116 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:arm-musleabi-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
|
||||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/rpi-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,139 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,114 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,139 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/library/rust:1.70.0-bullseye as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
# Install build dependencies for the armhf architecture
|
|
||||||
RUN dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
libc6-dev:armhf \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
libmariadb-dev-compat:armhf \
|
|
||||||
libmariadb3:armhf \
|
|
||||||
libpq-dev:armhf \
|
|
||||||
libpq5:armhf \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
#
|
|
||||||
# Make sure cargo has the right target config
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
|
||||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
|
||||||
|
|
||||||
# Set arm specific environment values
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
|
||||||
CROSS_COMPILE="1" \
|
|
||||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
|
||||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-debian:bullseye
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadb-dev-compat \
|
|
||||||
libpq5 \
|
|
||||||
openssl \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
|
@ -1,114 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
|
||||||
# Using the digest instead of the tag name provides better security,
|
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
|
||||||
# be changed to point to a malicious image.
|
|
||||||
#
|
|
||||||
# To verify the current digest for a given tag name:
|
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
|
||||||
# - From the command line:
|
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.5.0
|
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085]
|
|
||||||
#
|
|
||||||
# - Conversely, to get the tag name from the digest:
|
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085
|
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.5.0]
|
|
||||||
#
|
|
||||||
FROM docker.io/vaultwarden/web-vault@sha256:e5b5e99d132d50dc73176afb65f41cf3b834fb06bfa1d621ac16c705c3f10085 as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
FROM docker.io/blackdex/rust-musl:armv7-musleabihf-stable-1.70.0 as build
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
|
||||||
LANG=C.UTF-8 \
|
|
||||||
TZ=UTC \
|
|
||||||
TERM=xterm-256color \
|
|
||||||
CARGO_HOME="/root/.cargo" \
|
|
||||||
USER="root"
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
|
||||||
&& rustup set profile minimal
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
|
|
||||||
&& find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM docker.io/balenalib/armv7hf-alpine:3.17
|
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
|
||||||
ROCKET_PORT=80 \
|
|
||||||
SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Create data folder and Install needed libraries
|
|
||||||
RUN mkdir /data \
|
|
||||||
&& apk add --no-cache \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
openssl \
|
|
||||||
tzdata
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
WORKDIR /
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
CMD ["/start.sh"]
|
|
15
docker/bake.sh
Executable file
15
docker/bake.sh
Executable file
|
@ -0,0 +1,15 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Make sure we set the context to `..` so it will go up one directory
|
||||||
|
docker buildx bake --progress plain --set "*.context=${BASEDIR}/.." -f "${BASEDIR}/docker-bake.hcl" "$@"
|
33
docker/bake_env.sh
Normal file
33
docker/bake_env.sh
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# If SOURCE_COMMIT is provided via env skip this
|
||||||
|
if [ -z "${SOURCE_COMMIT+x}" ]; then
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If VW_VERSION is provided via env use it as SOURCE_VERSION
|
||||||
|
# Else define it using git
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
SOURCE_VERSION="${VW_VERSION}"
|
||||||
|
else
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||||
|
case "${GIT_BRANCH}" in
|
||||||
|
main|master|HEAD)
|
||||||
|
# Do not add the branch name for these branches
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SOURCE_VERSION="${SOURCE_VERSION} (${GIT_BRANCH})"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export the rendered variables above so bake will use them
|
||||||
|
export SOURCE_COMMIT
|
||||||
|
export SOURCE_VERSION
|
269
docker/docker-bake.hcl
Normal file
269
docker/docker-bake.hcl
Normal file
|
@ -0,0 +1,269 @@
|
||||||
|
// ==== Baking Variables ====
|
||||||
|
|
||||||
|
// Set which cargo profile to use, dev or release for example
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "CARGO_PROFILE" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set which DB's (features) to enable
|
||||||
|
// Use the value provided in the Dockerfile as default
|
||||||
|
variable "DB" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The repository this build was triggered from
|
||||||
|
variable "SOURCE_REPOSITORY_URL" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The commit hash of of the current commit this build was triggered on
|
||||||
|
variable "SOURCE_COMMIT" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The version of this build
|
||||||
|
// Typically the current exact tag of this commit,
|
||||||
|
// else the last tag and the first 8 characters of the source commit
|
||||||
|
variable "SOURCE_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// This can be used to overwrite SOURCE_VERSION
|
||||||
|
// It will be used during the build.rs building stage
|
||||||
|
variable "VW_VERSION" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
// The base tag(s) to use
|
||||||
|
// This can be a comma separated value like "testing,1.29.2"
|
||||||
|
variable "BASE_TAGS" {
|
||||||
|
default = "testing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Which container registries should be used for the tagging
|
||||||
|
// This can be a comma separated value
|
||||||
|
// Use a full URI like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server`
|
||||||
|
variable "CONTAINER_REGISTRIES" {
|
||||||
|
default = "vaultwarden/server"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking Groups ====
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["debian"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Shared Baking ====
|
||||||
|
function "labels" {
|
||||||
|
params = []
|
||||||
|
result = {
|
||||||
|
"org.opencontainers.image.description" = "Unofficial Bitwarden compatible server written in Rust - ${SOURCE_VERSION}"
|
||||||
|
"org.opencontainers.image.licenses" = "AGPL-3.0-only"
|
||||||
|
"org.opencontainers.image.documentation" = "https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
"org.opencontainers.image.url" = "https://github.com/dani-garcia/vaultwarden"
|
||||||
|
"org.opencontainers.image.created" = "${formatdate("YYYY-MM-DD'T'hh:mm:ssZZZZZ", timestamp())}"
|
||||||
|
"org.opencontainers.image.source" = "${SOURCE_REPOSITORY_URL}"
|
||||||
|
"org.opencontainers.image.revision" = "${SOURCE_COMMIT}"
|
||||||
|
"org.opencontainers.image.version" = "${SOURCE_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "_default_attributes" {
|
||||||
|
labels = labels()
|
||||||
|
args = {
|
||||||
|
DB = "${DB}"
|
||||||
|
CARGO_PROFILE = "${CARGO_PROFILE}"
|
||||||
|
VW_VERSION = "${VW_VERSION}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Debian Baking ====
|
||||||
|
|
||||||
|
// Default Debian target, will build a container using the hosts platform architecture
|
||||||
|
target "debian" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.debian"
|
||||||
|
tags = generate_tags("", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "debian-multi" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "debian-amd64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-arm64" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv7" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-armv6" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==== Start of unsupported Debian architecture targets ===
|
||||||
|
// These are provided just to help users build for these rare platforms
|
||||||
|
// They will not be built by default
|
||||||
|
target "debian-386" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/386"]
|
||||||
|
tags = generate_tags("", "-386")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-ppc64le" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/ppc64le"]
|
||||||
|
tags = generate_tags("", "-ppc64le")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-s390x" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/s390x"]
|
||||||
|
tags = generate_tags("", "-s390x")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ==== End of unsupported Debian architecture targets ===
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "debian-all" {
|
||||||
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Alpine Baking ====
|
||||||
|
|
||||||
|
// Default Alpine target, will build a container using the hosts platform architecture
|
||||||
|
target "alpine" {
|
||||||
|
inherits = ["_default_attributes"]
|
||||||
|
dockerfile = "docker/Dockerfile.alpine"
|
||||||
|
tags = generate_tags("-alpine", platform_tag())
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
|
// This is mainly used by GitHub Actions to build and push new containers
|
||||||
|
target "alpine-multi" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "")
|
||||||
|
output = [join(",", flatten([["type=registry"], image_index_annotations()]))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per platform targets, to individually test building per platform locally
|
||||||
|
target "alpine-amd64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
tags = generate_tags("-alpine", "-amd64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-arm64" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm64"]
|
||||||
|
tags = generate_tags("-alpine", "-arm64")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv7" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v7"]
|
||||||
|
tags = generate_tags("-alpine", "-armv7")
|
||||||
|
}
|
||||||
|
|
||||||
|
target "alpine-armv6" {
|
||||||
|
inherits = ["alpine"]
|
||||||
|
platforms = ["linux/arm/v6"]
|
||||||
|
tags = generate_tags("-alpine", "-armv6")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Group to build all platforms individually for local testing
|
||||||
|
group "alpine-all" {
|
||||||
|
targets = ["alpine-amd64", "alpine-arm64", "alpine-armv7", "alpine-armv6"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Bake everything locally ====
|
||||||
|
|
||||||
|
group "all" {
|
||||||
|
targets = ["debian-all", "alpine-all"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Baking functions ====
|
||||||
|
|
||||||
|
// This will return the local platform as amd64, arm64 or armv7 for example
|
||||||
|
// It can be used for creating a local image tag
|
||||||
|
function "platform_tag" {
|
||||||
|
params = []
|
||||||
|
result = "-${replace(replace(BAKE_LOCAL_PLATFORM, "linux/", ""), "/", "")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function "get_container_registries" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", CONTAINER_REGISTRIES))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "get_base_tags" {
|
||||||
|
params = []
|
||||||
|
result = flatten(split(",", BASE_TAGS))
|
||||||
|
}
|
||||||
|
|
||||||
|
function "generate_tags" {
|
||||||
|
params = [
|
||||||
|
suffix, // What to append to the BASE_TAG when needed, like `-alpine` for example
|
||||||
|
platform // the platform we are building for if needed
|
||||||
|
]
|
||||||
|
result = flatten([
|
||||||
|
for registry in get_container_registries() :
|
||||||
|
[for base_tag in get_base_tags() :
|
||||||
|
concat(
|
||||||
|
# If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too
|
||||||
|
base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [],
|
||||||
|
# The default tagging strategy
|
||||||
|
["${registry}:${base_tag}${suffix}${platform}"]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
|
function "image_index_annotations" {
|
||||||
|
params = []
|
||||||
|
result = flatten([
|
||||||
|
for key, value in labels() :
|
||||||
|
value != null ? formatlist("annotation-index.%s=%s", "${key}", "${value}") : []
|
||||||
|
])
|
||||||
|
}
|
|
@ -1,16 +1,24 @@
|
||||||
#!/bin/sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
# Use the value of the corresponding env var (if present),
|
# Use the value of the corresponding env var (if present),
|
||||||
# or a default value otherwise.
|
# or a default value otherwise.
|
||||||
: "${DATA_FOLDER:="data"}"
|
: "${DATA_FOLDER:="/data"}"
|
||||||
: "${ROCKET_PORT:="80"}"
|
: "${ROCKET_PORT:="80"}"
|
||||||
|
: "${ENV_FILE:="/.env"}"
|
||||||
|
|
||||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Check if the $ENV_FILE file exist and is readable
|
||||||
|
# If that is the case, load it into the environment before running any check
|
||||||
|
if [ -r "${ENV_FILE}" ]; then
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
. "${ENV_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
# Given a config key, return the corresponding config value from the
|
# Given a config key, return the corresponding config value from the
|
||||||
# config file. If the key doesn't exist, return an empty string.
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
get_config_val() {
|
get_config_val() {
|
||||||
local key="$1"
|
key="$1"
|
||||||
# Extract a line of the form:
|
# Extract a line of the form:
|
||||||
# "domain": "https://bw.example.com/path",
|
# "domain": "https://bw.example.com/path",
|
||||||
grep "\"${key}\":" "${CONFIG_FILE}" |
|
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||||
|
|
105
docker/podman-bake.sh
Executable file
105
docker/podman-bake.sh
Executable file
|
@ -0,0 +1,105 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Determine the basedir of this script.
|
||||||
|
# It should be located in the same directory as the docker-bake.hcl
|
||||||
|
# This ensures you can run this script from both inside and outside of the docker directory
|
||||||
|
BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")")
|
||||||
|
|
||||||
|
# Load build env's
|
||||||
|
source "${BASEDIR}/bake_env.sh"
|
||||||
|
|
||||||
|
# Check if a target is given as first argument
|
||||||
|
# If not we assume the defaults and pass the given arguments to the podman command
|
||||||
|
case "${1}" in
|
||||||
|
alpine*|debian*)
|
||||||
|
TARGET="${1}"
|
||||||
|
# Now shift the $@ array so we only have the rest of the arguments
|
||||||
|
# This allows us too append these as extra arguments too the podman buildx build command
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
LABEL_ARGS=(
|
||||||
|
--label org.opencontainers.image.description="Unofficial Bitwarden compatible server written in Rust"
|
||||||
|
--label org.opencontainers.image.licenses="AGPL-3.0-only"
|
||||||
|
--label org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
||||||
|
--label org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
||||||
|
--label org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
)
|
||||||
|
if [[ -n "${SOURCE_REPOSITORY_URL}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_COMMIT}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.revision="${SOURCE_COMMIT}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${SOURCE_VERSION}" ]]; then
|
||||||
|
LABEL_ARGS+=(--label org.opencontainers.image.version="${SOURCE_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if and which --build-arg arguments we need to configure
|
||||||
|
BUILD_ARGS=()
|
||||||
|
if [[ -n "${DB}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg DB="${DB}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${CARGO_PROFILE}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg CARGO_PROFILE="${CARGO_PROFILE}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${VW_VERSION}" ]]; then
|
||||||
|
BUILD_ARGS+=(--build-arg VW_VERSION="${VW_VERSION}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default BASE_TAGS if non are provided
|
||||||
|
if [[ -z "${BASE_TAGS}" ]]; then
|
||||||
|
BASE_TAGS="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the default CONTAINER_REGISTRIES if non are provided
|
||||||
|
if [[ -z "${CONTAINER_REGISTRIES}" ]]; then
|
||||||
|
CONTAINER_REGISTRIES="vaultwarden/server"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check which Dockerfile we need to use, default is debian
|
||||||
|
case "${TARGET}" in
|
||||||
|
alpine*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-alpine"
|
||||||
|
DOCKERFILE="Dockerfile.alpine"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
DOCKERFILE="Dockerfile.debian"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Check which platform we need to build and append the BASE_TAGS with the architecture
|
||||||
|
case "${TARGET}" in
|
||||||
|
*-arm64)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-arm64"
|
||||||
|
PLATFORM="linux/arm64"
|
||||||
|
;;
|
||||||
|
*-armv7)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv7"
|
||||||
|
PLATFORM="linux/arm/v7"
|
||||||
|
;;
|
||||||
|
*-armv6)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-armv6"
|
||||||
|
PLATFORM="linux/arm/v6"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
BASE_TAGS="${BASE_TAGS}-amd64"
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Be verbose on what is being executed
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Build the image with podman
|
||||||
|
# We use the docker format here since we are using `SHELL`, which is not supported by OCI
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
podman buildx build \
|
||||||
|
--platform="${PLATFORM}" \
|
||||||
|
--tag="${CONTAINER_REGISTRIES}:${BASE_TAGS}" \
|
||||||
|
--format=docker \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
|
"${BUILD_ARGS[@]}" \
|
||||||
|
--file="${BASEDIR}/${DOCKERFILE}" "$@" \
|
||||||
|
"${BASEDIR}/.."
|
|
@ -1,17 +1,31 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import os, argparse, json
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
|
# Load settings file
|
||||||
|
with open("DockerSettings.yaml", 'r') as yaml_file:
|
||||||
|
yaml_data = yaml.safe_load(yaml_file)
|
||||||
|
|
||||||
|
settings_env = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
|
)
|
||||||
|
settings_yaml = yaml.safe_load(settings_env.get_template("DockerSettings.yaml").render(yaml_data))
|
||||||
|
|
||||||
args_parser = argparse.ArgumentParser()
|
args_parser = argparse.ArgumentParser()
|
||||||
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||||
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||||
cli_args = args_parser.parse_args()
|
cli_args = args_parser.parse_args()
|
||||||
|
|
||||||
|
# Merge the default config yaml with the json arguments given.
|
||||||
render_vars = json.loads(cli_args.render_vars)
|
render_vars = json.loads(cli_args.render_vars)
|
||||||
|
settings_yaml.update(render_vars)
|
||||||
|
|
||||||
environment = jinja2.Environment(
|
environment = jinja2.Environment(
|
||||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||||
trim_blocks=True,
|
trim_blocks=True,
|
||||||
)
|
)
|
||||||
print(environment.get_template(cli_args.template_file).render(render_vars))
|
print(environment.get_template(cli_args.template_file).render(settings_yaml))
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
|
||||||
|
|
||||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
|
||||||
|
|
||||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
|
||||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
|
||||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
|
||||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
|
||||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
|
||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
|
||||||
|
|
||||||
The current multi-arch image build relies on the original vaultwarden Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
|
||||||
|
|
||||||
## References
|
|
||||||
|
|
||||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
|
||||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
|
||||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
|
||||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
|
|
@ -1,15 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# The default Debian-based images support these arches for all database backends.
|
|
||||||
arches=(
|
|
||||||
amd64
|
|
||||||
armv6
|
|
||||||
armv7
|
|
||||||
arm64
|
|
||||||
)
|
|
||||||
export arches
|
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
distro_suffix=.alpine
|
|
||||||
fi
|
|
||||||
export distro_suffix
|
|
51
hooks/build
51
hooks/build
|
@ -1,51 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
echo ">>> Building images..."
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
|
||||||
# This var is typically predefined by Docker Hub, but it won't be
|
|
||||||
# when testing locally.
|
|
||||||
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Construct a version string in the style of `build.rs`.
|
|
||||||
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
|
||||||
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
|
||||||
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
|
||||||
else
|
|
||||||
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
|
||||||
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
LABELS=(
|
|
||||||
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
|
||||||
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
|
||||||
org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki"
|
|
||||||
org.opencontainers.image.licenses="AGPL-3.0-only"
|
|
||||||
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
|
||||||
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
|
||||||
org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden"
|
|
||||||
org.opencontainers.image.version="${SOURCE_VERSION}"
|
|
||||||
)
|
|
||||||
LABEL_ARGS=()
|
|
||||||
for label in "${LABELS[@]}"; do
|
|
||||||
LABEL_ARGS+=(--label "${label}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildkit as template
|
|
||||||
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
|
|
||||||
buildkit_suffix=.buildkit
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker build \
|
|
||||||
"${LABEL_ARGS[@]}" \
|
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
|
||||||
-f "docker/${arch}/Dockerfile${buildkit_suffix}${distro_suffix}" \
|
|
||||||
.
|
|
||||||
done
|
|
|
@ -1,28 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# If requested, print some environment info for troubleshooting.
|
|
||||||
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
|
||||||
id
|
|
||||||
pwd
|
|
||||||
df -h
|
|
||||||
env
|
|
||||||
docker info
|
|
||||||
docker version
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install build dependencies.
|
|
||||||
deps=(
|
|
||||||
jq
|
|
||||||
)
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y "${deps[@]}"
|
|
||||||
|
|
||||||
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
|
||||||
# Git operations that we perform later, so fetch the complete history and
|
|
||||||
# tags first. Note that if the build is cached, the clone may have been
|
|
||||||
# unshallowed already; if so, unshallowing will fail, so skip it.
|
|
||||||
if [[ -f .git/shallow ]]; then
|
|
||||||
git fetch --unshallow --tags
|
|
||||||
fi
|
|
111
hooks/push
111
hooks/push
|
@ -1,111 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# shellcheck source=arches.sh
|
|
||||||
source ./hooks/arches.sh
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
|
|
||||||
# Join a list of args with a single char.
|
|
||||||
# Ref: https://stackoverflow.com/a/17841619
|
|
||||||
join() { local IFS="$1"; shift; echo "$*"; }
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
echo ">>> Starting local Docker registry when needed..."
|
|
||||||
|
|
||||||
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
|
||||||
# builds, but it can't access existing images on the Docker host (like the
|
|
||||||
# cross-compiled ones we just built). Those images first need to be pushed to
|
|
||||||
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
|
||||||
# up those intermediate images on Docker Hub, it's easier to just run a local
|
|
||||||
# Docker registry, which gets cleaned up automatically once the build job ends.
|
|
||||||
#
|
|
||||||
# https://docs.docker.com/registry/deploying/
|
|
||||||
# https://hub.docker.com/_/registry
|
|
||||||
#
|
|
||||||
# Use host networking so the buildx container can access the registry via
|
|
||||||
# localhost.
|
|
||||||
#
|
|
||||||
# First check if there already is a registry container running, else skip it.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
|
|
||||||
# defaults to port 5000
|
|
||||||
docker run -d --name registry --network host registry:2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
|
||||||
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
|
||||||
LOCAL_REGISTRY="localhost:5000"
|
|
||||||
REPO="${DOCKER_REPO#*/}"
|
|
||||||
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
|
||||||
|
|
||||||
echo ">>> Pushing images to local registry..."
|
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
|
||||||
docker tag "${docker_image}" "${local_image}"
|
|
||||||
docker push "${local_image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ">>> Setting up Docker Buildx..."
|
|
||||||
|
|
||||||
# Same as earlier, use host networking so the buildx container can access the
|
|
||||||
# registry via localhost.
|
|
||||||
#
|
|
||||||
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
|
||||||
#
|
|
||||||
# Check if there already is a builder running, else skip this and use the existing.
|
|
||||||
# This will only happen either locally or running it via Github Actions
|
|
||||||
#
|
|
||||||
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
|
|
||||||
docker buildx create --name builder --use --driver-opt network=host
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ">>> Running Docker Buildx..."
|
|
||||||
|
|
||||||
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
|
||||||
|
|
||||||
# If the Docker tag starts with a version number, assume the latest release
|
|
||||||
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
|
||||||
# to make it easier for users to track the latest release.
|
|
||||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
|
||||||
tags+=("${DOCKER_REPO}:alpine")
|
|
||||||
else
|
|
||||||
tags+=("${DOCKER_REPO}:latest")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
tag_args=()
|
|
||||||
for tag in "${tags[@]}"; do
|
|
||||||
tag_args+=(--tag "${tag}")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
|
||||||
# the arch list to a platform list (assuming the OS is always `linux`).
|
|
||||||
declare -A arch_to_platform=(
|
|
||||||
[amd64]="linux/amd64"
|
|
||||||
[armv6]="linux/arm/v6"
|
|
||||||
[armv7]="linux/arm/v7"
|
|
||||||
[arm64]="linux/arm64"
|
|
||||||
)
|
|
||||||
platforms=()
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
platforms+=("${arch_to_platform[$arch]}")
|
|
||||||
done
|
|
||||||
platform="$(join "," "${platforms[@]}")"
|
|
||||||
|
|
||||||
# Run the build, pushing the resulting images and multi-arch manifest list to
|
|
||||||
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
|
||||||
# context, which isn't needed here since the actual cross-compiled images
|
|
||||||
# have already been built.
|
|
||||||
docker buildx build \
|
|
||||||
--network host \
|
|
||||||
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
|
||||||
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
|
||||||
--platform "${platform}" \
|
|
||||||
"${tag_args[@]}" \
|
|
||||||
--push \
|
|
||||||
- < ./docker/Dockerfile.buildx
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
|
@ -0,0 +1,5 @@
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY master_password_hash TEXT;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
MODIFY enc_key TEXT;
|
|
@ -0,0 +1,2 @@
|
||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
2
migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN `key` TEXT;
|
|
@ -0,0 +1 @@
|
||||||
|
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
|
|
@ -0,0 +1 @@
|
||||||
|
ALTER TABLE twofactor MODIFY last_used BIGINT NOT NULL;
|
|
@ -0,0 +1,19 @@
|
||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid CHAR(36) NOT NULL,
|
||||||
|
organization_uuid CHAR(36),
|
||||||
|
request_device_identifier CHAR(36) NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id CHAR(36),
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date TIMESTAMP NOT NULL,
|
||||||
|
response_date TIMESTAMP,
|
||||||
|
authentication_date TIMESTAMP,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
|
@ -0,0 +1,5 @@
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN master_password_hash DROP NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE auth_requests
|
||||||
|
ALTER COLUMN enc_key DROP NOT NULL;
|
|
@ -0,0 +1,2 @@
|
||||||
|
ALTER TABLE users_organizations
|
||||||
|
ADD COLUMN external_id TEXT;
|
|
@ -0,0 +1,2 @@
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
|
@ -0,0 +1,3 @@
|
||||||
|
ALTER TABLE attachments
|
||||||
|
ALTER COLUMN file_size TYPE BIGINT,
|
||||||
|
ALTER COLUMN file_size SET NOT NULL;
|
|
@ -0,0 +1,3 @@
|
||||||
|
ALTER TABLE twofactor
|
||||||
|
ALTER COLUMN last_used TYPE BIGINT,
|
||||||
|
ALTER COLUMN last_used SET NOT NULL;
|
|
@ -0,0 +1,19 @@
|
||||||
|
CREATE TABLE auth_requests (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT NOT NULL,
|
||||||
|
master_password_hash TEXT NOT NULL,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY(user_uuid) REFERENCES users(uuid),
|
||||||
|
FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid)
|
||||||
|
);
|
|
@ -0,0 +1,29 @@
|
||||||
|
-- Create new auth_requests table with master_password_hash as nullable column
|
||||||
|
CREATE TABLE auth_requests_new (
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
user_uuid TEXT NOT NULL,
|
||||||
|
organization_uuid TEXT,
|
||||||
|
request_device_identifier TEXT NOT NULL,
|
||||||
|
device_type INTEGER NOT NULL,
|
||||||
|
request_ip TEXT NOT NULL,
|
||||||
|
response_device_id TEXT,
|
||||||
|
access_code TEXT NOT NULL,
|
||||||
|
public_key TEXT NOT NULL,
|
||||||
|
enc_key TEXT,
|
||||||
|
master_password_hash TEXT,
|
||||||
|
approved BOOLEAN,
|
||||||
|
creation_date DATETIME NOT NULL,
|
||||||
|
response_date DATETIME,
|
||||||
|
authentication_date DATETIME,
|
||||||
|
FOREIGN KEY (user_uuid) REFERENCES users (uuid),
|
||||||
|
FOREIGN KEY (organization_uuid) REFERENCES organizations (uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer current data to new table
|
||||||
|
INSERT INTO auth_requests_new SELECT * FROM auth_requests;
|
||||||
|
|
||||||
|
-- Drop the old table
|
||||||
|
DROP TABLE auth_requests;
|
||||||
|
|
||||||
|
-- Rename the new table to the original name
|
||||||
|
ALTER TABLE auth_requests_new RENAME TO auth_requests;
|
|
@ -0,0 +1,2 @@
|
||||||
|
-- Add the external_id to the users_organizations table
|
||||||
|
ALTER TABLE "users_organizations" ADD COLUMN "external_id" TEXT;
|
|
@ -0,0 +1,2 @@
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN "key" TEXT;
|
|
@ -0,0 +1 @@
|
||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
|
@ -0,0 +1 @@
|
||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
|
@ -1 +0,0 @@
|
||||||
1.70.0
|
|
4
rust-toolchain.toml
Normal file
4
rust-toolchain.toml
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[toolchain]
|
||||||
|
channel = "1.79.0"
|
||||||
|
components = [ "rustfmt", "clippy" ]
|
||||||
|
profile = "minimal"
|
|
@ -13,14 +13,18 @@ use rocket::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
api::{
|
||||||
|
core::{log_event, two_factor},
|
||||||
|
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||||
|
},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
mail,
|
mail,
|
||||||
util::{
|
util::{
|
||||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
container_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client,
|
||||||
|
is_running_in_container, NumberOrString,
|
||||||
},
|
},
|
||||||
CONFIG, VERSION,
|
CONFIG, VERSION,
|
||||||
};
|
};
|
||||||
|
@ -184,12 +188,11 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true);
|
||||||
.finish();
|
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
|
@ -279,12 +282,11 @@ async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
||||||
#[post("/invite", data = "<data>")]
|
#[post("/invite", data = "<data>")]
|
||||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let data: InviteData = data.into_inner();
|
let data: InviteData = data.into_inner();
|
||||||
let email = data.email.clone();
|
|
||||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||||
err_code!("User already exists", Status::Conflict.code)
|
err_code!("User already exists", Status::Conflict.code)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = User::new(email);
|
let mut user = User::new(data.email);
|
||||||
|
|
||||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
|
@ -314,7 +316,7 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
Redirect::to(admin_path())
|
Redirect::to(admin_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,6 +328,10 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["UserEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
|
usr["LastActive"] = match u.last_active(&mut conn).await {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
|
None => json!(None::<String>),
|
||||||
|
};
|
||||||
users_json.push(usr);
|
users_json.push(usr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,7 +346,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
|
||||||
usr["user_enabled"] = json!(u.enabled);
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||||
|
@ -388,7 +394,7 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
&user_org.org_uuid,
|
&user_org.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -407,7 +413,7 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||||
match unregister_push_device(device.uuid).await {
|
match unregister_push_device(device.push_uuid).await {
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||||
};
|
};
|
||||||
|
@ -443,9 +449,10 @@ async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyR
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
|
@ -503,7 +510,11 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
||||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot modify this user to this type because they have not setup 2FA");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||||
|
@ -515,7 +526,7 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&user_to_edit.uuid,
|
||||||
&data.org_uuid,
|
&data.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -543,7 +554,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
||||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
|
||||||
organizations_json.push(org);
|
organizations_json.push(org);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,7 +612,7 @@ use cached::proc_macro::cached;
|
||||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||||
#[cached(time = 300, sync_writes = true)]
|
#[cached(time = 300, sync_writes = true)]
|
||||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
|
||||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
if has_http_access {
|
if has_http_access {
|
||||||
(
|
(
|
||||||
|
@ -618,9 +629,9 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
||||||
}
|
}
|
||||||
_ => "-".to_string(),
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
// Do not fetch the web-vault version when running within Docker.
|
// Do not fetch the web-vault version when running within a container.
|
||||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
if running_within_docker {
|
if running_within_container {
|
||||||
"-".to_string()
|
"-".to_string()
|
||||||
} else {
|
} else {
|
||||||
match get_json_api::<GitRelease>(
|
match get_json_api::<GitRelease>(
|
||||||
|
@ -674,7 +685,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Execute some environment checks
|
// Execute some environment checks
|
||||||
let running_within_docker = is_running_in_docker();
|
let running_within_container = is_running_in_container();
|
||||||
let has_http_access = has_http_access().await;
|
let has_http_access = has_http_access().await;
|
||||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|| env::var_os("http_proxy").is_some()
|
|| env::var_os("http_proxy").is_some()
|
||||||
|
@ -688,12 +699,9 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||||
};
|
};
|
||||||
|
|
||||||
let (latest_release, latest_commit, latest_web_build) =
|
let (latest_release, latest_commit, latest_web_build) =
|
||||||
get_release_info(has_http_access, running_within_docker).await;
|
get_release_info(has_http_access, running_within_container).await;
|
||||||
|
|
||||||
let ip_header_name = match &ip_header.0 {
|
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||||
Some(h) => h,
|
|
||||||
_ => "",
|
|
||||||
};
|
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
|
@ -703,11 +711,11 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
"running_within_docker": running_within_docker,
|
"running_within_container": running_within_container,
|
||||||
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
|
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||||
"has_http_access": has_http_access,
|
"has_http_access": has_http_access,
|
||||||
"ip_header_exists": &ip_header.0.is_some(),
|
"ip_header_exists": !ip_header_name.is_empty(),
|
||||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
|
||||||
"ip_header_name": ip_header_name,
|
"ip_header_name": ip_header_name,
|
||||||
"ip_header_config": &CONFIG.ip_header(),
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
"uses_proxy": uses_proxy,
|
"uses_proxy": uses_proxy,
|
||||||
|
@ -783,16 +791,16 @@ impl<'r> FromRequest<'r> for AdminToken {
|
||||||
if requested_page.is_empty() {
|
if requested_page.is_empty() {
|
||||||
return Outcome::Forward(Status::Unauthorized);
|
return Outcome::Forward(Status::Unauthorized);
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
return Outcome::Error((Status::Unauthorized, "Unauthorized"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if decode_admin(access_token).is_err() {
|
if decode_admin(access_token).is_err() {
|
||||||
// Remove admin cookie
|
// Remove admin cookie
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
return Outcome::Error((Status::Unauthorized, "Session expired"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
|
|
|
@ -1,16 +1,20 @@
|
||||||
|
use crate::db::DbPool;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, register_push_device, unregister_push_device, EmptyResult, JsonResult, JsonUpcase,
|
core::{log_user_event, two_factor::email},
|
||||||
Notify, NumberOrString, PasswordData, UpdateType,
|
register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult, JsonUpcase, Notify,
|
||||||
|
PasswordOrOtpData, UpdateType,
|
||||||
},
|
},
|
||||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
|
@ -46,11 +50,15 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||||
api_key,
|
api_key,
|
||||||
rotate_api_key,
|
rotate_api_key,
|
||||||
get_known_device,
|
get_known_device,
|
||||||
get_known_device_from_path,
|
|
||||||
put_avatar,
|
put_avatar,
|
||||||
put_device_token,
|
put_device_token,
|
||||||
put_clear_device_token,
|
put_clear_device_token,
|
||||||
post_clear_device_token,
|
post_clear_device_token,
|
||||||
|
post_auth_request,
|
||||||
|
get_auth_request,
|
||||||
|
put_auth_request,
|
||||||
|
get_auth_request_response,
|
||||||
|
get_auth_requests,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,6 +104,19 @@ fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
async fn is_email_2fa_required(org_user_uuid: Option<String>, conn: &mut DbConn) -> bool {
|
||||||
|
if !CONFIG._enable_email_2fa() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if CONFIG.email_2fa_enforce_on_verified_invite() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if org_user_uuid.is_some() {
|
||||||
|
return OrgPolicy::is_enabled_by_org(&org_user_uuid.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/accounts/register", data = "<data>")]
|
#[post("/accounts/register", data = "<data>")]
|
||||||
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
||||||
|
@ -144,7 +165,8 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||||
}
|
}
|
||||||
user
|
user
|
||||||
} else if CONFIG.is_signup_allowed(&email)
|
} else if CONFIG.is_signup_allowed(&email)
|
||||||
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()
|
|| (CONFIG.emergency_access_allowed()
|
||||||
|
&& EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some())
|
||||||
{
|
{
|
||||||
user
|
user
|
||||||
} else {
|
} else {
|
||||||
|
@ -195,14 +217,25 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
||||||
error!("Error sending welcome email: {:#?}", e);
|
error!("Error sending welcome email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
user.last_verifying_at = Some(user.created_at);
|
user.last_verifying_at = Some(user.created_at);
|
||||||
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
||||||
error!("Error sending welcome email: {:#?}", e);
|
error!("Error sending welcome email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if verified_by_invite && is_email_2fa_required(data.OrganizationUserId, &mut conn).await {
|
||||||
|
let _ = email::activate_email_2fa(&user, &mut conn).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
|
|
||||||
|
// accept any open emergency access invitations
|
||||||
|
if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() {
|
||||||
|
for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await {
|
||||||
|
let _ = emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Object": "register",
|
"Object": "register",
|
||||||
"CaptchaBypassToken": "",
|
"CaptchaBypassToken": "",
|
||||||
|
@ -273,8 +306,9 @@ async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: Db
|
||||||
#[get("/users/<uuid>/public-key")]
|
#[get("/users/<uuid>/public-key")]
|
||||||
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
||||||
Some(user) => user,
|
Some(user) if user.public_key.is_some() => user,
|
||||||
None => err!("User doesn't exist"),
|
Some(_) => err_code!("User has no public_key", Status::NotFound.code),
|
||||||
|
None => err_code!("User doesn't exist", Status::NotFound.code),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
|
@ -340,7 +374,7 @@ async fn post_password(
|
||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
|
@ -411,24 +445,46 @@ async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: D
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct UpdateFolderData {
|
struct UpdateFolderData {
|
||||||
Id: String,
|
// There is a bug in 2024.3.x which adds a `null` item.
|
||||||
|
// To bypass this we allow a Option here, but skip it during the updates
|
||||||
|
// See: https://github.com/bitwarden/clients/issues/8453
|
||||||
|
Id: Option<String>,
|
||||||
Name: String,
|
Name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct UpdateEmergencyAccessData {
|
||||||
|
Id: String,
|
||||||
|
KeyEncrypted: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct UpdateResetPasswordData {
|
||||||
|
OrganizationId: String,
|
||||||
|
ResetPasswordKey: String,
|
||||||
|
}
|
||||||
|
|
||||||
use super::ciphers::CipherData;
|
use super::ciphers::CipherData;
|
||||||
|
use super::sends::{update_send_from_data, SendData};
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct KeyData {
|
struct KeyData {
|
||||||
Ciphers: Vec<CipherData>,
|
Ciphers: Vec<CipherData>,
|
||||||
Folders: Vec<UpdateFolderData>,
|
Folders: Vec<UpdateFolderData>,
|
||||||
|
Sends: Vec<SendData>,
|
||||||
|
EmergencyAccessKeys: Vec<UpdateEmergencyAccessData>,
|
||||||
|
ResetPasswordKeys: Vec<UpdateResetPasswordData>,
|
||||||
Key: String,
|
Key: String,
|
||||||
PrivateKey: String,
|
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: String,
|
||||||
|
PrivateKey: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/key", data = "<data>")]
|
#[post("/accounts/key", data = "<data>")]
|
||||||
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
|
// TODO: See if we can wrap everything within a SQL Transaction. If something fails it should revert everything.
|
||||||
let data: KeyData = data.into_inner().data;
|
let data: KeyData = data.into_inner().data;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
@ -445,37 +501,83 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
||||||
|
|
||||||
// Update folder data
|
// Update folder data
|
||||||
for folder_data in data.Folders {
|
for folder_data in data.Folders {
|
||||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &mut conn).await {
|
// Skip `null` folder id entries.
|
||||||
Some(folder) => folder,
|
// See: https://github.com/bitwarden/clients/issues/8453
|
||||||
None => err!("Folder doesn't exist"),
|
if let Some(folder_id) = folder_data.Id {
|
||||||
|
let mut saved_folder = match Folder::find_by_uuid(&folder_id, &mut conn).await {
|
||||||
|
Some(folder) => folder,
|
||||||
|
None => err!("Folder doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if &saved_folder.user_uuid != user_uuid {
|
||||||
|
err!("The folder is not owned by the user")
|
||||||
|
}
|
||||||
|
|
||||||
|
saved_folder.name = folder_data.Name;
|
||||||
|
saved_folder.save(&mut conn).await?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update emergency access data
|
||||||
|
for emergency_access_data in data.EmergencyAccessKeys {
|
||||||
|
let mut saved_emergency_access = match EmergencyAccess::find_by_uuid(&emergency_access_data.Id, &mut conn).await
|
||||||
|
{
|
||||||
|
Some(emergency_access) => emergency_access,
|
||||||
|
None => err!("Emergency access doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if &saved_folder.user_uuid != user_uuid {
|
if &saved_emergency_access.grantor_uuid != user_uuid {
|
||||||
err!("The folder is not owned by the user")
|
err!("The emergency access is not owned by the user")
|
||||||
}
|
}
|
||||||
|
|
||||||
saved_folder.name = folder_data.Name;
|
saved_emergency_access.key_encrypted = Some(emergency_access_data.KeyEncrypted);
|
||||||
saved_folder.save(&mut conn).await?
|
saved_emergency_access.save(&mut conn).await?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update reset password data
|
||||||
|
for reset_password_data in data.ResetPasswordKeys {
|
||||||
|
let mut user_org =
|
||||||
|
match UserOrganization::find_by_user_and_org(user_uuid, &reset_password_data.OrganizationId, &mut conn)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Some(reset_password) => reset_password,
|
||||||
|
None => err!("Reset password doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
user_org.reset_password_key = Some(reset_password_data.ResetPasswordKey);
|
||||||
|
user_org.save(&mut conn).await?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update send data
|
||||||
|
for send_data in data.Sends {
|
||||||
|
let mut send = match Send::find_by_uuid(send_data.Id.as_ref().unwrap(), &mut conn).await {
|
||||||
|
Some(send) => send,
|
||||||
|
None => err!("Send doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
update_send_from_data(&mut send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update cipher data
|
// Update cipher data
|
||||||
use super::ciphers::update_cipher_from_data;
|
use super::ciphers::update_cipher_from_data;
|
||||||
|
|
||||||
for cipher_data in data.Ciphers {
|
for cipher_data in data.Ciphers {
|
||||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
|
if cipher_data.OrganizationId.is_none() {
|
||||||
Some(cipher) => cipher,
|
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
|
||||||
None => err!("Cipher doesn't exist"),
|
Some(cipher) => cipher,
|
||||||
};
|
None => err!("Cipher doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
|
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
|
||||||
err!("The cipher is not owned by the user")
|
err!("The cipher is not owned by the user")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||||
|
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||||
|
// We force the users to logout after the user has been saved to try and prevent these issues.
|
||||||
|
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None)
|
||||||
|
.await?
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
|
||||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
|
||||||
// We force the users to logout after the user has been saved to try and prevent these issues.
|
|
||||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
|
|
||||||
.await?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update user data
|
// Update user data
|
||||||
|
@ -487,7 +589,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
||||||
|
|
||||||
let save_result = user.save(&mut conn).await;
|
let save_result = user.save(&mut conn).await;
|
||||||
|
|
||||||
// Prevent loging out the client where the user requested this endpoint from.
|
// Prevent logging out the client where the user requested this endpoint from.
|
||||||
// If you do logout the user it will causes issues at the client side.
|
// If you do logout the user it will causes issues at the client side.
|
||||||
// Adding the device uuid will prevent this.
|
// Adding the device uuid will prevent this.
|
||||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||||
|
@ -497,17 +599,15 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
||||||
|
|
||||||
#[post("/accounts/security-stamp", data = "<data>")]
|
#[post("/accounts/security-stamp", data = "<data>")]
|
||||||
async fn post_sstamp(
|
async fn post_sstamp(
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
@ -527,6 +627,10 @@ struct EmailTokenData {
|
||||||
|
|
||||||
#[post("/accounts/email-token", data = "<data>")]
|
#[post("/accounts/email-token", data = "<data>")]
|
||||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: EmailTokenData = data.into_inner().data;
|
let data: EmailTokenData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
|
@ -548,6 +652,8 @@ async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mu
|
||||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token).await {
|
if let Err(e) = mail::send_change_email(&data.NewEmail, &token).await {
|
||||||
error!("Error sending change-email email: {:#?}", e);
|
error!("Error sending change-email email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
debug!("Email change request for user ({}) to email ({}) with token ({})", user.uuid, data.NewEmail, token);
|
||||||
}
|
}
|
||||||
|
|
||||||
user.email_new = Some(data.NewEmail);
|
user.email_new = Some(data.NewEmail);
|
||||||
|
@ -573,6 +679,10 @@ async fn post_email(
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
if !CONFIG.email_change_allowed() {
|
||||||
|
err!("Email change is not allowed.");
|
||||||
|
}
|
||||||
|
|
||||||
let data: ChangeEmailData = data.into_inner().data;
|
let data: ChangeEmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
|
@ -722,25 +832,23 @@ async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/delete", data = "<data>")]
|
#[post("/accounts/delete", data = "<data>")]
|
||||||
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
async fn post_delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
delete_account(data, headers, conn).await
|
delete_account(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/accounts", data = "<data>")]
|
#[delete("/accounts", data = "<data>")]
|
||||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
user.delete(&mut conn).await
|
user.delete(&mut conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/accounts/revision-date")]
|
#[get("/accounts/revision-date")]
|
||||||
fn revision_date(headers: Headers) -> JsonResult {
|
fn revision_date(headers: Headers) -> JsonResult {
|
||||||
let revision_date = headers.user.updated_at.timestamp_millis();
|
let revision_date = headers.user.updated_at.and_utc().timestamp_millis();
|
||||||
Ok(Json(json!(revision_date)))
|
Ok(Json(json!(revision_date)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -840,20 +948,13 @@ fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn _api_key(
|
async fn _api_key(data: JsonUpcase<PasswordOrOtpData>, rotate: bool, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<SecretVerificationRequest>,
|
|
||||||
rotate: bool,
|
|
||||||
headers: Headers,
|
|
||||||
mut conn: DbConn,
|
|
||||||
) -> JsonResult {
|
|
||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
let data: SecretVerificationRequest = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
if rotate || user.api_key.is_none() {
|
if rotate || user.api_key.is_none() {
|
||||||
user.api_key = Some(crypto::generate_api_key());
|
user.api_key = Some(crypto::generate_api_key());
|
||||||
|
@ -868,31 +969,24 @@ async fn _api_key(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/api-key", data = "<data>")]
|
#[post("/accounts/api-key", data = "<data>")]
|
||||||
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, false, headers, conn).await
|
_api_key(data, false, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/rotate-api-key", data = "<data>")]
|
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn rotate_api_key(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
_api_key(data, true, headers, conn).await
|
_api_key(data, true, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// This variant is deprecated: https://github.com/bitwarden/server/pull/2682
|
#[get("/devices/knowndevice")]
|
||||||
#[get("/devices/knowndevice/<email>/<uuid>")]
|
async fn get_known_device(device: KnownDevice, mut conn: DbConn) -> JsonResult {
|
||||||
async fn get_known_device_from_path(email: &str, uuid: &str, mut conn: DbConn) -> JsonResult {
|
|
||||||
// This endpoint doesn't have auth header
|
|
||||||
let mut result = false;
|
let mut result = false;
|
||||||
if let Some(user) = User::find_by_mail(email, &mut conn).await {
|
if let Some(user) = User::find_by_mail(&device.email, &mut conn).await {
|
||||||
result = Device::find_by_uuid_and_user(uuid, &user.uuid, &mut conn).await.is_some();
|
result = Device::find_by_uuid_and_user(&device.uuid, &user.uuid, &mut conn).await.is_some();
|
||||||
}
|
}
|
||||||
Ok(Json(json!(result)))
|
Ok(Json(json!(result)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/devices/knowndevice")]
|
|
||||||
async fn get_known_device(device: KnownDevice, conn: DbConn) -> JsonResult {
|
|
||||||
get_known_device_from_path(&device.email, &device.uuid, conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
struct KnownDevice {
|
struct KnownDevice {
|
||||||
email: String,
|
email: String,
|
||||||
uuid: String,
|
uuid: String,
|
||||||
|
@ -907,26 +1001,23 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
||||||
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
||||||
Ok(bytes) => bytes,
|
Ok(bytes) => bytes,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
|
||||||
Status::BadRequest,
|
|
||||||
"X-Request-Email value failed to decode as base64url",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
match String::from_utf8(email_bytes) {
|
match String::from_utf8(email_bytes) {
|
||||||
Ok(email) => email,
|
Ok(email) => email,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Request-Email value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Request-Email value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||||
uuid.to_string()
|
uuid.to_string()
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required"));
|
return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||||
};
|
};
|
||||||
|
|
||||||
Outcome::Success(KnownDevice {
|
Outcome::Success(KnownDevice {
|
||||||
|
@ -949,27 +1040,34 @@ async fn post_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Hea
|
||||||
|
|
||||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||||
async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
if !CONFIG.push_enabled() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = data.into_inner().data;
|
let data = data.into_inner().data;
|
||||||
let token = data.PushToken;
|
let token = data.PushToken;
|
||||||
|
|
||||||
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
||||||
Some(device) => device,
|
Some(device) => device,
|
||||||
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// if the device already has been registered
|
||||||
|
if device.is_registered() {
|
||||||
|
// check if the new token is the same as the registered token
|
||||||
|
if device.push_token.is_some() && device.push_token.unwrap() == token.clone() {
|
||||||
|
debug!("Device {} is already registered and token is the same", uuid);
|
||||||
|
return Ok(());
|
||||||
|
} else {
|
||||||
|
// Try to unregister already registered device
|
||||||
|
let _ = unregister_push_device(device.push_uuid).await;
|
||||||
|
}
|
||||||
|
// clear the push_uuid
|
||||||
|
device.push_uuid = None;
|
||||||
|
}
|
||||||
device.push_token = Some(token);
|
device.push_token = Some(token);
|
||||||
if device.push_uuid.is_none() {
|
|
||||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
|
||||||
}
|
|
||||||
if let Err(e) = device.save(&mut conn).await {
|
if let Err(e) = device.save(&mut conn).await {
|
||||||
err!(format!("An error occured while trying to save the device push token: {e}"));
|
err!(format!("An error occurred while trying to save the device push token: {e}"));
|
||||||
}
|
|
||||||
if let Err(e) = register_push_device(headers.user.uuid, device).await {
|
|
||||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
register_push_device(&mut device, &mut conn).await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -985,7 +1083,7 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||||
|
|
||||||
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
||||||
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
||||||
unregister_push_device(device.uuid).await?;
|
unregister_push_device(device.push_uuid).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -996,3 +1094,211 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||||
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
||||||
put_clear_device_token(uuid, conn).await
|
put_clear_device_token(uuid, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AuthRequestRequest {
|
||||||
|
accessCode: String,
|
||||||
|
deviceIdentifier: String,
|
||||||
|
email: String,
|
||||||
|
publicKey: String,
|
||||||
|
#[serde(alias = "type")]
|
||||||
|
_type: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/auth-requests", data = "<data>")]
|
||||||
|
async fn post_auth_request(
|
||||||
|
data: Json<AuthRequestRequest>,
|
||||||
|
headers: ClientHeaders,
|
||||||
|
mut conn: DbConn,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data = data.into_inner();
|
||||||
|
|
||||||
|
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut auth_request = AuthRequest::new(
|
||||||
|
user.uuid.clone(),
|
||||||
|
data.deviceIdentifier.clone(),
|
||||||
|
headers.device_type,
|
||||||
|
headers.ip.ip.to_string(),
|
||||||
|
data.accessCode,
|
||||||
|
data.publicKey,
|
||||||
|
);
|
||||||
|
auth_request.save(&mut conn).await?;
|
||||||
|
|
||||||
|
nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.deviceIdentifier, &mut conn).await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"id": auth_request.uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": null,
|
||||||
|
"masterPasswordHash": null,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": null,
|
||||||
|
"requestApproved": false,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object": "auth-request"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests/<uuid>")]
|
||||||
|
async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct AuthResponseRequest {
|
||||||
|
deviceIdentifier: String,
|
||||||
|
key: String,
|
||||||
|
masterPasswordHash: Option<String>,
|
||||||
|
requestApproved: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/auth-requests/<uuid>", data = "<data>")]
|
||||||
|
async fn put_auth_request(
|
||||||
|
uuid: &str,
|
||||||
|
data: Json<AuthResponseRequest>,
|
||||||
|
mut conn: DbConn,
|
||||||
|
ant: AnonymousNotify<'_>,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
|
let data = data.into_inner();
|
||||||
|
let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auth_request.approved = Some(data.requestApproved);
|
||||||
|
auth_request.enc_key = Some(data.key);
|
||||||
|
auth_request.master_password_hash = data.masterPasswordHash;
|
||||||
|
auth_request.response_device_id = Some(data.deviceIdentifier.clone());
|
||||||
|
auth_request.save(&mut conn).await?;
|
||||||
|
|
||||||
|
if auth_request.approved.unwrap_or(false) {
|
||||||
|
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
|
||||||
|
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.deviceIdentifier, &mut conn).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests/<uuid>/response?<code>")]
|
||||||
|
async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||||
|
Some(auth_request) => auth_request,
|
||||||
|
None => {
|
||||||
|
err!("AuthRequest doesn't exist")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !auth_request.check_access_code(code) {
|
||||||
|
err!("Access code invalid doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
Ok(Json(json!(
|
||||||
|
{
|
||||||
|
"id": uuid,
|
||||||
|
"publicKey": auth_request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||||
|
"requestIpAddress": auth_request.request_ip,
|
||||||
|
"key": auth_request.enc_key,
|
||||||
|
"masterPasswordHash": auth_request.master_password_hash,
|
||||||
|
"creationDate": auth_request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": auth_request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
}
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/auth-requests")]
|
||||||
|
async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
|
let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"data": auth_requests
|
||||||
|
.iter()
|
||||||
|
.filter(|request| request.approved.is_none())
|
||||||
|
.map(|request| {
|
||||||
|
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc());
|
||||||
|
|
||||||
|
json!({
|
||||||
|
"id": request.uuid,
|
||||||
|
"publicKey": request.public_key,
|
||||||
|
"requestDeviceType": DeviceType::from_i32(request.device_type).to_string(),
|
||||||
|
"requestIpAddress": request.request_ip,
|
||||||
|
"key": request.enc_key,
|
||||||
|
"masterPasswordHash": request.master_password_hash,
|
||||||
|
"creationDate": request.creation_date.and_utc(),
|
||||||
|
"responseDate": response_date_utc,
|
||||||
|
"requestApproved": request.approved,
|
||||||
|
"origin": CONFIG.domain_origin(),
|
||||||
|
"object":"auth-request"
|
||||||
|
})
|
||||||
|
}).collect::<Vec<Value>>(),
|
||||||
|
"continuationToken": null,
|
||||||
|
"object": "list"
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn purge_auth_requests(pool: DbPool) {
|
||||||
|
debug!("Purging auth requests");
|
||||||
|
if let Ok(mut conn) = pool.get().await {
|
||||||
|
AuthRequest::purge_expired_auth_requests(&mut conn).await;
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging trashed ciphers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use num_traits::ToPrimitive;
|
||||||
use rocket::fs::TempFile;
|
use rocket::fs::TempFile;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
|
@ -9,8 +10,9 @@ use rocket::{
|
||||||
};
|
};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::util::NumberOrString;
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
|
@ -204,14 +206,15 @@ pub struct CipherData {
|
||||||
// Folder id is not included in import
|
// Folder id is not included in import
|
||||||
FolderId: Option<String>,
|
FolderId: Option<String>,
|
||||||
// TODO: Some of these might appear all the time, no need for Option
|
// TODO: Some of these might appear all the time, no need for Option
|
||||||
OrganizationId: Option<String>,
|
pub OrganizationId: Option<String>,
|
||||||
|
|
||||||
|
Key: Option<String>,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Login = 1,
|
Login = 1,
|
||||||
SecureNote = 2,
|
SecureNote = 2,
|
||||||
Card = 3,
|
Card = 3,
|
||||||
Identity = 4,
|
Identity = 4
|
||||||
Fido2Key = 5
|
|
||||||
*/
|
*/
|
||||||
pub Type: i32,
|
pub Type: i32,
|
||||||
pub Name: String,
|
pub Name: String,
|
||||||
|
@ -223,7 +226,6 @@ pub struct CipherData {
|
||||||
SecureNote: Option<Value>,
|
SecureNote: Option<Value>,
|
||||||
Card: Option<Value>,
|
Card: Option<Value>,
|
||||||
Identity: Option<Value>,
|
Identity: Option<Value>,
|
||||||
Fido2Key: Option<Value>,
|
|
||||||
|
|
||||||
Favorite: Option<bool>,
|
Favorite: Option<bool>,
|
||||||
Reprompt: Option<i32>,
|
Reprompt: Option<i32>,
|
||||||
|
@ -320,7 +322,7 @@ async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn:
|
||||||
data.LastKnownRevisionDate = None;
|
data.LastKnownRevisionDate = None;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
||||||
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
|
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||||
}
|
}
|
||||||
|
@ -351,7 +353,7 @@ pub async fn update_cipher_from_data(
|
||||||
cipher: &mut Cipher,
|
cipher: &mut Cipher,
|
||||||
data: CipherData,
|
data: CipherData,
|
||||||
headers: &Headers,
|
headers: &Headers,
|
||||||
shared_to_collection: bool,
|
shared_to_collections: Option<Vec<String>>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
nt: &Notify<'_>,
|
nt: &Notify<'_>,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
|
@ -359,14 +361,17 @@ pub async fn update_cipher_from_data(
|
||||||
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
|
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
|
||||||
|
|
||||||
// Check that the client isn't updating an existing cipher with stale data.
|
// Check that the client isn't updating an existing cipher with stale data.
|
||||||
if let Some(dt) = data.LastKnownRevisionDate {
|
// And only perform this check when not importing ciphers, else the date/time check will fail.
|
||||||
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
if ut != UpdateType::None {
|
||||||
// ISO 8601 format
|
if let Some(dt) = data.LastKnownRevisionDate {
|
||||||
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
match NaiveDateTime::parse_from_str(&dt, "%+") {
|
||||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
// ISO 8601 format
|
||||||
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||||
|
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 => {
|
||||||
|
err!("The client copy of this cipher is out of date. Resync the client and try again.")
|
||||||
|
}
|
||||||
|
Ok(_) => (),
|
||||||
}
|
}
|
||||||
Ok(_) => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,7 +392,7 @@ pub async fn update_cipher_from_data(
|
||||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
|
||||||
None => err!("You don't have permission to add item to organization"),
|
None => err!("You don't have permission to add item to organization"),
|
||||||
Some(org_user) => {
|
Some(org_user) => {
|
||||||
if shared_to_collection
|
if shared_to_collections.is_some()
|
||||||
|| org_user.has_full_access()
|
|| org_user.has_full_access()
|
||||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
|
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
|
||||||
{
|
{
|
||||||
|
@ -466,7 +471,6 @@ pub async fn update_cipher_from_data(
|
||||||
2 => data.SecureNote,
|
2 => data.SecureNote,
|
||||||
3 => data.Card,
|
3 => data.Card,
|
||||||
4 => data.Identity,
|
4 => data.Identity,
|
||||||
5 => data.Fido2Key,
|
|
||||||
_ => err!("Invalid type"),
|
_ => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -483,6 +487,7 @@ pub async fn update_cipher_from_data(
|
||||||
None => err!("Data missing"),
|
None => err!("Data missing"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
cipher.key = data.Key;
|
||||||
cipher.name = data.Name;
|
cipher.name = data.Name;
|
||||||
cipher.notes = data.Notes;
|
cipher.notes = data.Notes;
|
||||||
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
|
||||||
|
@ -507,15 +512,22 @@ pub async fn update_cipher_from_data(
|
||||||
event_type as i32,
|
event_type as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await, &headers.device.uuid, None, conn)
|
nt.send_cipher_update(
|
||||||
.await;
|
ut,
|
||||||
|
cipher,
|
||||||
|
&cipher.update_users_revision(conn).await,
|
||||||
|
&headers.device.uuid,
|
||||||
|
shared_to_collections,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -576,7 +588,7 @@ async fn post_ciphers_import(
|
||||||
cipher_data.FolderId = folder_uuid;
|
cipher_data.FolderId = folder_uuid;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?;
|
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
@ -644,7 +656,7 @@ async fn put_cipher(
|
||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
}
|
}
|
||||||
|
|
||||||
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||||
}
|
}
|
||||||
|
@ -788,7 +800,7 @@ async fn post_collections_admin(
|
||||||
EventType::CipherUpdatedCollections as i32,
|
EventType::CipherUpdatedCollections as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
&cipher.organization_uuid.unwrap(),
|
&cipher.organization_uuid.unwrap(),
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -846,7 +858,6 @@ async fn put_cipher_share_selected(
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let mut data: ShareSelectedCipherData = data.into_inner().data;
|
let mut data: ShareSelectedCipherData = data.into_inner().data;
|
||||||
let mut cipher_ids: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
if data.Ciphers.is_empty() {
|
if data.Ciphers.is_empty() {
|
||||||
err!("You must select at least one cipher.")
|
err!("You must select at least one cipher.")
|
||||||
|
@ -857,10 +868,9 @@ async fn put_cipher_share_selected(
|
||||||
}
|
}
|
||||||
|
|
||||||
for cipher in data.Ciphers.iter() {
|
for cipher in data.Ciphers.iter() {
|
||||||
match cipher.Id {
|
if cipher.Id.is_none() {
|
||||||
Some(ref id) => cipher_ids.push(id.to_string()),
|
err!("Request missing ids field")
|
||||||
None => err!("Request missing ids field"),
|
}
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while let Some(cipher) = data.Ciphers.pop() {
|
while let Some(cipher) = data.Ciphers.pop() {
|
||||||
|
@ -896,7 +906,7 @@ async fn share_cipher_by_uuid(
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut shared_to_collection = false;
|
let mut shared_to_collections = vec![];
|
||||||
|
|
||||||
if let Some(organization_uuid) = &data.Cipher.OrganizationId {
|
if let Some(organization_uuid) = &data.Cipher.OrganizationId {
|
||||||
for uuid in &data.CollectionIds {
|
for uuid in &data.CollectionIds {
|
||||||
|
@ -905,7 +915,7 @@ async fn share_cipher_by_uuid(
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
if collection.is_writable_by_user(&headers.user.uuid, conn).await {
|
if collection.is_writable_by_user(&headers.user.uuid, conn).await {
|
||||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
|
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
|
||||||
shared_to_collection = true;
|
shared_to_collections.push(collection.uuid);
|
||||||
} else {
|
} else {
|
||||||
err!("No rights to modify the collection")
|
err!("No rights to modify the collection")
|
||||||
}
|
}
|
||||||
|
@ -921,7 +931,7 @@ async fn share_cipher_by_uuid(
|
||||||
UpdateType::SyncCipherCreate
|
UpdateType::SyncCipherCreate
|
||||||
};
|
};
|
||||||
|
|
||||||
update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, nt, ut).await?;
|
update_cipher_from_data(&mut cipher, data.Cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
||||||
}
|
}
|
||||||
|
@ -955,7 +965,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
|
||||||
struct AttachmentRequestData {
|
struct AttachmentRequestData {
|
||||||
Key: String,
|
Key: String,
|
||||||
FileName: String,
|
FileName: String,
|
||||||
FileSize: i32,
|
FileSize: NumberOrString,
|
||||||
AdminRequest: Option<bool>, // true when attaching from an org vault view
|
AdminRequest: Option<bool>, // true when attaching from an org vault view
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -984,10 +994,15 @@ async fn post_attachment_v2(
|
||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
}
|
}
|
||||||
|
|
||||||
let attachment_id = crypto::generate_attachment_id();
|
|
||||||
let data: AttachmentRequestData = data.into_inner().data;
|
let data: AttachmentRequestData = data.into_inner().data;
|
||||||
|
let file_size = data.FileSize.into_i64()?;
|
||||||
|
|
||||||
|
if file_size < 0 {
|
||||||
|
err!("Attachment size can't be negative")
|
||||||
|
}
|
||||||
|
let attachment_id = crypto::generate_attachment_id();
|
||||||
let attachment =
|
let attachment =
|
||||||
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
|
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, file_size, Some(data.Key));
|
||||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||||
|
|
||||||
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
|
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
|
||||||
|
@ -1027,6 +1042,15 @@ async fn save_attachment(
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
||||||
|
let mut data = data.into_inner();
|
||||||
|
|
||||||
|
let Some(size) = data.data.len().to_i64() else {
|
||||||
|
err!("Attachment data size overflow");
|
||||||
|
};
|
||||||
|
if size < 0 {
|
||||||
|
err!("Attachment size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
|
@ -1039,19 +1063,29 @@ async fn save_attachment(
|
||||||
// In the v2 API, the attachment record has already been created,
|
// In the v2 API, the attachment record has already been created,
|
||||||
// so the size limit needs to be adjusted to account for that.
|
// so the size limit needs to be adjusted to account for that.
|
||||||
let size_adjust = match &attachment {
|
let size_adjust = match &attachment {
|
||||||
None => 0, // Legacy API
|
None => 0, // Legacy API
|
||||||
Some(a) => i64::from(a.file_size), // v2 API
|
Some(a) => a.file_size, // v2 API
|
||||||
};
|
};
|
||||||
|
|
||||||
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||||
match CONFIG.user_attachment_limit() {
|
match CONFIG.user_attachment_limit() {
|
||||||
Some(0) => err!("Attachments are disabled"),
|
Some(0) => err!("Attachments are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &mut conn).await + size_adjust;
|
let already_used = Attachment::size_by_user(user_uuid, &mut conn).await;
|
||||||
|
let left = limit_kb
|
||||||
|
.checked_mul(1024)
|
||||||
|
.and_then(|l| l.checked_sub(already_used))
|
||||||
|
.and_then(|l| l.checked_add(size_adjust));
|
||||||
|
|
||||||
|
let Some(left) = left else {
|
||||||
|
err!("Attachment size overflow");
|
||||||
|
};
|
||||||
|
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||||
}
|
}
|
||||||
Some(left as u64)
|
|
||||||
|
Some(left)
|
||||||
}
|
}
|
||||||
None => None,
|
None => None,
|
||||||
}
|
}
|
||||||
|
@ -1059,11 +1093,21 @@ async fn save_attachment(
|
||||||
match CONFIG.org_attachment_limit() {
|
match CONFIG.org_attachment_limit() {
|
||||||
Some(0) => err!("Attachments are disabled"),
|
Some(0) => err!("Attachments are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &mut conn).await + size_adjust;
|
let already_used = Attachment::size_by_org(org_uuid, &mut conn).await;
|
||||||
|
let left = limit_kb
|
||||||
|
.checked_mul(1024)
|
||||||
|
.and_then(|l| l.checked_sub(already_used))
|
||||||
|
.and_then(|l| l.checked_add(size_adjust));
|
||||||
|
|
||||||
|
let Some(left) = left else {
|
||||||
|
err!("Attachment size overflow");
|
||||||
|
};
|
||||||
|
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||||
}
|
}
|
||||||
Some(left as u64)
|
|
||||||
|
Some(left)
|
||||||
}
|
}
|
||||||
None => None,
|
None => None,
|
||||||
}
|
}
|
||||||
|
@ -1071,10 +1115,8 @@ async fn save_attachment(
|
||||||
err!("Cipher is neither owned by a user nor an organization");
|
err!("Cipher is neither owned by a user nor an organization");
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut data = data.into_inner();
|
|
||||||
|
|
||||||
if let Some(size_limit) = size_limit {
|
if let Some(size_limit) = size_limit {
|
||||||
if data.data.len() > size_limit {
|
if size > size_limit {
|
||||||
err!("Attachment storage limit exceeded with this file");
|
err!("Attachment storage limit exceeded with this file");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1084,20 +1126,19 @@ async fn save_attachment(
|
||||||
None => crypto::generate_attachment_id(), // Legacy API
|
None => crypto::generate_attachment_id(), // Legacy API
|
||||||
};
|
};
|
||||||
|
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
|
|
||||||
let file_path = folder_path.join(&file_id);
|
|
||||||
tokio::fs::create_dir_all(&folder_path).await?;
|
|
||||||
|
|
||||||
let size = data.data.len() as i32;
|
|
||||||
if let Some(attachment) = &mut attachment {
|
if let Some(attachment) = &mut attachment {
|
||||||
// v2 API
|
// v2 API
|
||||||
|
|
||||||
// Check the actual size against the size initially provided by
|
// Check the actual size against the size initially provided by
|
||||||
// the client. Upstream allows +/- 1 MiB deviation from this
|
// the client. Upstream allows +/- 1 MiB deviation from this
|
||||||
// size, but it's not clear when or why this is needed.
|
// size, but it's not clear when or why this is needed.
|
||||||
const LEEWAY: i32 = 1024 * 1024; // 1 MiB
|
const LEEWAY: i64 = 1024 * 1024; // 1 MiB
|
||||||
let min_size = attachment.file_size - LEEWAY;
|
let Some(max_size) = attachment.file_size.checked_add(LEEWAY) else {
|
||||||
let max_size = attachment.file_size + LEEWAY;
|
err!("Invalid attachment size max")
|
||||||
|
};
|
||||||
|
let Some(min_size) = attachment.file_size.checked_sub(LEEWAY) else {
|
||||||
|
err!("Invalid attachment size min")
|
||||||
|
};
|
||||||
|
|
||||||
if min_size <= size && size <= max_size {
|
if min_size <= size && size <= max_size {
|
||||||
if size != attachment.file_size {
|
if size != attachment.file_size {
|
||||||
|
@ -1112,6 +1153,10 @@ async fn save_attachment(
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Legacy API
|
// Legacy API
|
||||||
|
|
||||||
|
// SAFETY: This value is only stored in the database and is not used to access the file system.
|
||||||
|
// As a result, the conditions specified by Rocket [0] are met and this is safe to use.
|
||||||
|
// [0]: https://docs.rs/rocket/latest/rocket/fs/struct.FileName.html#-danger-
|
||||||
let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string());
|
let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string());
|
||||||
|
|
||||||
if encrypted_filename.is_none() {
|
if encrypted_filename.is_none() {
|
||||||
|
@ -1121,10 +1166,14 @@ async fn save_attachment(
|
||||||
err!("No attachment key provided")
|
err!("No attachment key provided")
|
||||||
}
|
}
|
||||||
let attachment =
|
let attachment =
|
||||||
Attachment::new(file_id, String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
|
Attachment::new(file_id.clone(), String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
|
||||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
|
||||||
|
let file_path = folder_path.join(&file_id);
|
||||||
|
tokio::fs::create_dir_all(&folder_path).await?;
|
||||||
|
|
||||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||||
data.data.move_copy_to(file_path).await?
|
data.data.move_copy_to(file_path).await?
|
||||||
}
|
}
|
||||||
|
@ -1144,7 +1193,7 @@ async fn save_attachment(
|
||||||
EventType::CipherAttachmentCreated as i32,
|
EventType::CipherAttachmentCreated as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1454,19 +1503,15 @@ struct OrganizationId {
|
||||||
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
#[post("/ciphers/purge?<organization..>", data = "<data>")]
|
||||||
async fn delete_all(
|
async fn delete_all(
|
||||||
organization: Option<OrganizationId>,
|
organization: Option<OrganizationId>,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
match organization {
|
match organization {
|
||||||
Some(org_data) => {
|
Some(org_data) => {
|
||||||
|
@ -1482,7 +1527,7 @@ async fn delete_all(
|
||||||
EventType::OrganizationPurgedVault as i32,
|
EventType::OrganizationPurgedVault as i32,
|
||||||
&org_data.org_id,
|
&org_data.org_id,
|
||||||
&org_data.org_id,
|
&org_data.org_id,
|
||||||
user.uuid,
|
&user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1563,16 +1608,8 @@ async fn _delete_cipher_by_uuid(
|
||||||
false => EventType::CipherDeleted as i32,
|
false => EventType::CipherDeleted as i32,
|
||||||
};
|
};
|
||||||
|
|
||||||
log_event(
|
log_event(event_type, &cipher.uuid, &org_uuid, &headers.user.uuid, headers.device.atype, &headers.ip.ip, conn)
|
||||||
event_type,
|
.await;
|
||||||
&cipher.uuid,
|
|
||||||
&org_uuid,
|
|
||||||
headers.user.uuid.clone(),
|
|
||||||
headers.device.atype,
|
|
||||||
&headers.ip.ip,
|
|
||||||
conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1632,7 +1669,7 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbCon
|
||||||
EventType::CipherRestored as i32,
|
EventType::CipherRestored as i32,
|
||||||
&cipher.uuid.clone(),
|
&cipher.uuid.clone(),
|
||||||
org_uuid,
|
org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -1716,7 +1753,7 @@ async fn _delete_cipher_attachment_by_id(
|
||||||
EventType::CipherAttachmentDeleted as i32,
|
EventType::CipherAttachmentDeleted as i32,
|
||||||
&cipher.uuid,
|
&cipher.uuid,
|
||||||
&org_uuid,
|
&org_uuid,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -1752,7 +1789,7 @@ impl CipherSyncData {
|
||||||
let cipher_folders: HashMap<String, String>;
|
let cipher_folders: HashMap<String, String>;
|
||||||
let cipher_favorites: HashSet<String>;
|
let cipher_favorites: HashSet<String>;
|
||||||
match sync_type {
|
match sync_type {
|
||||||
// User Sync supports Folders and Favorits
|
// User Sync supports Folders and Favorites
|
||||||
CipherSyncType::User => {
|
CipherSyncType::User => {
|
||||||
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
|
||||||
cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect();
|
cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect();
|
||||||
|
@ -1760,7 +1797,7 @@ impl CipherSyncData {
|
||||||
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
||||||
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect();
|
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect();
|
||||||
}
|
}
|
||||||
// Organization Sync does not support Folders and Favorits.
|
// Organization Sync does not support Folders and Favorites.
|
||||||
// If these are set, it will cause issues in the web-vault.
|
// If these are set, it will cause issues in the web-vault.
|
||||||
CipherSyncType::Organization => {
|
CipherSyncType::Organization => {
|
||||||
cipher_folders = HashMap::with_capacity(0);
|
cipher_folders = HashMap::with_capacity(0);
|
||||||
|
@ -1799,15 +1836,22 @@ impl CipherSyncData {
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Generate a HashMap with the collections_uuid as key and the CollectionGroup record
|
// Generate a HashMap with the collections_uuid as key and the CollectionGroup record
|
||||||
let user_collections_groups: HashMap<String, CollectionGroup> = CollectionGroup::find_by_user(user_uuid, conn)
|
let user_collections_groups: HashMap<String, CollectionGroup> = if CONFIG.org_groups_enabled() {
|
||||||
.await
|
CollectionGroup::find_by_user(user_uuid, conn)
|
||||||
.into_iter()
|
.await
|
||||||
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
.into_iter()
|
||||||
.collect();
|
.map(|collection_group| (collection_group.collections_uuid.clone(), collection_group))
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
HashMap::new()
|
||||||
|
};
|
||||||
|
|
||||||
// Get all organizations that the user has full access to via group assignement
|
// Get all organizations that the user has full access to via group assignment
|
||||||
let user_group_full_access_for_organizations: HashSet<String> =
|
let user_group_full_access_for_organizations: HashSet<String> = if CONFIG.org_groups_enabled() {
|
||||||
Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect();
|
Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect()
|
||||||
|
} else {
|
||||||
|
HashSet::new()
|
||||||
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
cipher_attachments,
|
cipher_attachments,
|
||||||
|
|
|
@ -1,15 +1,17 @@
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use rocket::{serde::json::Json, Route};
|
use rocket::{serde::json::Json, Route};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{CipherSyncData, CipherSyncType},
|
core::{CipherSyncData, CipherSyncType},
|
||||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
EmptyResult, JsonResult, JsonUpcase,
|
||||||
},
|
},
|
||||||
auth::{decode_emergency_access_invite, Headers},
|
auth::{decode_emergency_access_invite, Headers},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
|
@ -18,6 +20,7 @@ pub fn routes() -> Vec<Route> {
|
||||||
get_grantees,
|
get_grantees,
|
||||||
get_emergency_access,
|
get_emergency_access,
|
||||||
put_emergency_access,
|
put_emergency_access,
|
||||||
|
post_emergency_access,
|
||||||
delete_emergency_access,
|
delete_emergency_access,
|
||||||
post_delete_emergency_access,
|
post_delete_emergency_access,
|
||||||
send_invite,
|
send_invite,
|
||||||
|
@ -37,45 +40,66 @@ pub fn routes() -> Vec<Route> {
|
||||||
// region get
|
// region get
|
||||||
|
|
||||||
#[get("/emergency-access/trusted")]
|
#[get("/emergency-access/trusted")]
|
||||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
if !CONFIG.emergency_access_allowed() {
|
||||||
|
return Json(json!({
|
||||||
|
"Data": [{
|
||||||
|
"Id": "",
|
||||||
|
"Status": 2,
|
||||||
|
"Type": 0,
|
||||||
|
"WaitTimeDays": 0,
|
||||||
|
"GranteeId": "",
|
||||||
|
"Email": "",
|
||||||
|
"Name": "NOTE: Emergency Access is disabled!",
|
||||||
|
"Object": "emergencyAccessGranteeDetails",
|
||||||
|
|
||||||
|
}],
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
}));
|
||||||
|
}
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
|
||||||
|
emergency_access_list_json.push(grantee)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"Data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/granted")]
|
#[get("/emergency-access/granted")]
|
||||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
let emergency_access_list = if CONFIG.emergency_access_allowed() {
|
||||||
|
EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"Data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"Object": "list",
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/<emer_id>")]
|
#[get("/emergency-access/<emer_id>")]
|
||||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
Some(emergency_access) => Ok(Json(
|
||||||
|
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
|
||||||
|
)),
|
||||||
None => err!("Emergency access not valid."),
|
None => err!("Emergency access not valid."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,7 +127,7 @@ async fn post_emergency_access(
|
||||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||||
|
|
||||||
|
@ -133,7 +157,7 @@ async fn post_emergency_access(
|
||||||
|
|
||||||
#[delete("/emergency-access/<emer_id>")]
|
#[delete("/emergency-access/<emer_id>")]
|
||||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let grantor_user = headers.user;
|
let grantor_user = headers.user;
|
||||||
|
|
||||||
|
@ -169,7 +193,7 @@ struct EmergencyAccessInviteData {
|
||||||
|
|
||||||
#[post("/emergency-access/invite", data = "<data>")]
|
#[post("/emergency-access/invite", data = "<data>")]
|
||||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||||
let email = data.Email.to_lowercase();
|
let email = data.Email.to_lowercase();
|
||||||
|
@ -189,7 +213,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||||
err!("You can not set yourself as an emergency contact.")
|
err!("You can not set yourself as an emergency contact.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
|
||||||
None => {
|
None => {
|
||||||
if !CONFIG.invitations_allowed() {
|
if !CONFIG.invitations_allowed() {
|
||||||
err!(format!("Grantee user does not exist: {}", &email))
|
err!(format!("Grantee user does not exist: {}", &email))
|
||||||
|
@ -206,9 +230,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||||
|
|
||||||
let mut user = User::new(email.clone());
|
let mut user = User::new(email.clone());
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
user
|
(user, true)
|
||||||
}
|
}
|
||||||
Some(user) => user,
|
Some(user) if user.password_hash.is_empty() => (user, true),
|
||||||
|
Some(user) => (user, false),
|
||||||
};
|
};
|
||||||
|
|
||||||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
|
@ -236,15 +261,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !new_user {
|
||||||
// Automatically mark user as accepted if no email invites
|
// if mail is not enabled immediately accept the invitation for existing users
|
||||||
match User::find_by_mail(&email, &mut conn).await {
|
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
},
|
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -252,7 +271,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
|
@ -288,17 +307,12 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
|
||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !grantee_user.password_hash.is_empty() {
|
||||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
// accept the invitation for existing user
|
||||||
let invitation = Invitation::new(&email);
|
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
invitation.save(&mut conn).await?;
|
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||||
}
|
let invitation = Invitation::new(&email);
|
||||||
|
invitation.save(&mut conn).await?;
|
||||||
// Automatically mark user as accepted if no email invites
|
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -312,14 +326,14 @@ struct AcceptData {
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: AcceptData = data.into_inner().data;
|
let data: AcceptData = data.into_inner().data;
|
||||||
let token = &data.Token;
|
let token = &data.Token;
|
||||||
let claims = decode_emergency_access_invite(token)?;
|
let claims = decode_emergency_access_invite(token)?;
|
||||||
|
|
||||||
// This can happen if the user who received the invite used a different email to signup.
|
// This can happen if the user who received the invite used a different email to signup.
|
||||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
// Since we do not know if this is intended, we error out here and do nothing with the invite.
|
||||||
if claims.email != headers.user.email {
|
if claims.email != headers.user.email {
|
||||||
err!("Claim email does not match current users email")
|
err!("Claim email does not match current users email")
|
||||||
}
|
}
|
||||||
|
@ -347,10 +361,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
||||||
&& grantor_user.name == claims.grantor_name
|
&& grantor_user.name == claims.grantor_name
|
||||||
&& grantor_user.email == claims.grantor_email
|
&& grantor_user.email == claims.grantor_email
|
||||||
{
|
{
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||||
|
@ -362,26 +373,6 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn accept_invite_process(
|
|
||||||
grantee_uuid: &str,
|
|
||||||
emergency_access: &mut EmergencyAccess,
|
|
||||||
grantee_email: &str,
|
|
||||||
conn: &mut DbConn,
|
|
||||||
) -> EmptyResult {
|
|
||||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
|
||||||
err!("User email does not match invite.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
|
||||||
err!("Emergency contact already accepted.");
|
|
||||||
}
|
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
|
||||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
|
||||||
emergency_access.email = None;
|
|
||||||
emergency_access.save(conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct ConfirmData {
|
struct ConfirmData {
|
||||||
|
@ -395,7 +386,7 @@ async fn confirm_emergency_access(
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let confirming_user = headers.user;
|
let confirming_user = headers.user;
|
||||||
let data: ConfirmData = data.into_inner().data;
|
let data: ConfirmData = data.into_inner().data;
|
||||||
|
@ -444,7 +435,7 @@ async fn confirm_emergency_access(
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/initiate")]
|
#[post("/emergency-access/<emer_id>/initiate")]
|
||||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let initiating_user = headers.user;
|
let initiating_user = headers.user;
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
|
@ -484,7 +475,7 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/approve")]
|
#[post("/emergency-access/<emer_id>/approve")]
|
||||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
|
@ -522,7 +513,7 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reject")]
|
#[post("/emergency-access/<emer_id>/reject")]
|
||||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
|
@ -565,7 +556,7 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/view")]
|
#[post("/emergency-access/<emer_id>/view")]
|
||||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emer) => emer,
|
Some(emer) => emer,
|
||||||
|
@ -602,7 +593,7 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/takeover")]
|
#[post("/emergency-access/<emer_id>/takeover")]
|
||||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
|
@ -645,7 +636,7 @@ async fn password_emergency_access(
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||||
|
@ -722,9 +713,9 @@ fn is_valid_request(
|
||||||
&& emergency_access.atype == requested_access_type as i32
|
&& emergency_access.atype == requested_access_type as i32
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_emergency_access_allowed() -> EmptyResult {
|
fn check_emergency_access_enabled() -> EmptyResult {
|
||||||
if !CONFIG.emergency_access_allowed() {
|
if !CONFIG.emergency_access_allowed() {
|
||||||
err!("Emergency access is not allowed.")
|
err!("Emergency access is not enabled.")
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -746,7 +737,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
|
||||||
for mut emer in emergency_access_list {
|
for mut emer in emergency_access_list {
|
||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
let recovery_allowed_at =
|
let recovery_allowed_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
|
||||||
if recovery_allowed_at.le(&now) {
|
if recovery_allowed_at.le(&now) {
|
||||||
// Only update the access status
|
// Only update the access status
|
||||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||||
|
@ -802,10 +793,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
// Calculate the day before the recovery will become active
|
// Calculate the day before the recovery will become active
|
||||||
let final_recovery_reminder_at =
|
let final_recovery_reminder_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
|
||||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||||
last_notification_at + Duration::days(1)
|
last_notification_at + TimeDelta::try_days(1).unwrap()
|
||||||
} else {
|
} else {
|
||||||
now
|
now
|
||||||
};
|
};
|
||||||
|
|
|
@ -125,7 +125,7 @@ async fn get_user_events(
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
|
fn get_continuation_token(events_json: &[Value]) -> Option<&str> {
|
||||||
// When the length of the vec equals the max page_size there probably is more data
|
// When the length of the vec equals the max page_size there probably is more data
|
||||||
// When it is less, then all events are loaded.
|
// When it is less, then all events are loaded.
|
||||||
if events_json.len() as i64 == Event::PAGE_SIZE {
|
if events_json.len() as i64 == Event::PAGE_SIZE {
|
||||||
|
@ -263,7 +263,7 @@ pub async fn log_event(
|
||||||
event_type: i32,
|
event_type: i32,
|
||||||
source_uuid: &str,
|
source_uuid: &str,
|
||||||
org_uuid: &str,
|
org_uuid: &str,
|
||||||
act_user_uuid: String,
|
act_user_uuid: &str,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
ip: &IpAddr,
|
ip: &IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
|
@ -271,7 +271,7 @@ pub async fn log_event(
|
||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
@ -289,7 +289,7 @@ async fn _log_event(
|
||||||
let mut event = Event::new(event_type, event_date);
|
let mut event = Event::new(event_type, event_date);
|
||||||
match event_type {
|
match event_type {
|
||||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||||
// Collection Events
|
// Cipher Events
|
||||||
1100..=1199 => {
|
1100..=1199 => {
|
||||||
event.cipher_uuid = Some(String::from(source_uuid));
|
event.cipher_uuid = Some(String::from(source_uuid));
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,11 +8,11 @@ mod public;
|
||||||
mod sends;
|
mod sends;
|
||||||
pub mod two_factor;
|
pub mod two_factor;
|
||||||
|
|
||||||
|
pub use accounts::purge_auth_requests;
|
||||||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||||
pub use sends::purge_sends;
|
pub use sends::purge_sends;
|
||||||
pub use two_factor::send_incomplete_2fa_notifications;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||||
|
@ -46,15 +46,14 @@ pub fn events_routes() -> Vec<Route> {
|
||||||
//
|
//
|
||||||
// Move this somewhere else
|
// Move this somewhere else
|
||||||
//
|
//
|
||||||
use rocket::{serde::json::Json, Catcher, Route};
|
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::get_reqwest_client,
|
util::{get_reqwest_client, parse_experimental_client_feature_flags},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
@ -192,12 +191,22 @@ fn version() -> Json<&'static str> {
|
||||||
#[get("/config")]
|
#[get("/config")]
|
||||||
fn config() -> Json<Value> {
|
fn config() -> Json<Value> {
|
||||||
let domain = crate::CONFIG.domain();
|
let domain = crate::CONFIG.domain();
|
||||||
|
let mut feature_states =
|
||||||
|
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||||
|
// Force the new key rotation feature
|
||||||
|
feature_states.insert("key-rotation-improvements".to_string(), true);
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"version": crate::VERSION,
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
|
// We should make sure that we keep this updated when we support the new server features
|
||||||
|
// Version history:
|
||||||
|
// - Individual cipher key encryption: 2023.9.1
|
||||||
|
"version": "2024.2.0",
|
||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
"url": "https://github.com/dani-garcia/vaultwarden",
|
||||||
|
"version": crate::VERSION
|
||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"vault": domain,
|
"vault": domain,
|
||||||
|
@ -206,6 +215,7 @@ fn config() -> Json<Value> {
|
||||||
"notifications": format!("{domain}/notifications"),
|
"notifications": format!("{domain}/notifications"),
|
||||||
"sso": "",
|
"sso": "",
|
||||||
},
|
},
|
||||||
|
"featureStates": feature_states,
|
||||||
"object": "config",
|
"object": "config",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,14 +5,14 @@ use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_event, CipherSyncData, CipherSyncType},
|
core::{log_event, two_factor, CipherSyncData, CipherSyncType},
|
||||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData, UpdateType,
|
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, PasswordOrOtpData, UpdateType,
|
||||||
},
|
},
|
||||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
error::Error,
|
error::Error,
|
||||||
mail,
|
mail,
|
||||||
util::convert_json_key_lcase_first,
|
util::{convert_json_key_lcase_first, NumberOrString},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -60,6 +60,7 @@ pub fn routes() -> Vec<Route> {
|
||||||
put_policy,
|
put_policy,
|
||||||
get_organization_tax,
|
get_organization_tax,
|
||||||
get_plans,
|
get_plans,
|
||||||
|
get_plans_all,
|
||||||
get_plans_tax_rates,
|
get_plans_tax_rates,
|
||||||
import,
|
import,
|
||||||
post_org_keys,
|
post_org_keys,
|
||||||
|
@ -185,16 +186,13 @@ async fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, mut co
|
||||||
#[delete("/organizations/<org_id>", data = "<data>")]
|
#[delete("/organizations/<org_id>", data = "<data>")]
|
||||||
async fn delete_organization(
|
async fn delete_organization(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: OwnerHeaders,
|
headers: OwnerHeaders,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&password_hash) {
|
data.validate(&headers.user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
match Organization::find_by_uuid(org_id, &mut conn).await {
|
match Organization::find_by_uuid(org_id, &mut conn).await {
|
||||||
None => err!("Organization not found"),
|
None => err!("Organization not found"),
|
||||||
|
@ -205,7 +203,7 @@ async fn delete_organization(
|
||||||
#[post("/organizations/<org_id>/delete", data = "<data>")]
|
#[post("/organizations/<org_id>/delete", data = "<data>")]
|
||||||
async fn post_delete_organization(
|
async fn post_delete_organization(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: OwnerHeaders,
|
headers: OwnerHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
@ -227,7 +225,7 @@ async fn leave_organization(org_id: &str, headers: Headers, mut conn: DbConn) ->
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -280,7 +278,7 @@ async fn post_organization(
|
||||||
EventType::OrganizationUpdated as i32,
|
EventType::OrganizationUpdated as i32,
|
||||||
org_id,
|
org_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -295,7 +293,7 @@ async fn post_organization(
|
||||||
async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data":
|
"Data":
|
||||||
Collection::find_by_user_uuid(headers.user.uuid.clone(), &mut conn).await
|
Collection::find_by_user_uuid(headers.user.uuid, &mut conn).await
|
||||||
.iter()
|
.iter()
|
||||||
.map(Collection::to_json)
|
.map(Collection::to_json)
|
||||||
.collect::<Value>(),
|
.collect::<Value>(),
|
||||||
|
@ -322,9 +320,29 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose,
|
||||||
None => err!("User is not part of organization"),
|
None => err!("User is not part of organization"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// get all collection memberships for the current organization
|
||||||
let coll_users = CollectionUser::find_by_organization(org_id, &mut conn).await;
|
let coll_users = CollectionUser::find_by_organization(org_id, &mut conn).await;
|
||||||
|
|
||||||
|
// check if current user has full access to the organization (either directly or via any group)
|
||||||
|
let has_full_access_to_org = user_org.access_all
|
||||||
|
|| (CONFIG.org_groups_enabled()
|
||||||
|
&& GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await);
|
||||||
|
|
||||||
for col in Collection::find_by_organization(org_id, &mut conn).await {
|
for col in Collection::find_by_organization(org_id, &mut conn).await {
|
||||||
|
// check whether the current user has access to the given collection
|
||||||
|
let assigned = has_full_access_to_org
|
||||||
|
|| CollectionUser::has_access_to_collection_by_user(&col.uuid, &user_org.user_uuid, &mut conn).await
|
||||||
|
|| (CONFIG.org_groups_enabled()
|
||||||
|
&& GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await);
|
||||||
|
|
||||||
|
// get the users assigned directly to the given collection
|
||||||
|
let users: Vec<Value> = coll_users
|
||||||
|
.iter()
|
||||||
|
.filter(|collection_user| collection_user.collection_uuid == col.uuid)
|
||||||
|
.map(|collection_user| SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// get the group details for the given collection
|
||||||
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
||||||
CollectionGroup::find_by_collection(&col.uuid, &mut conn)
|
CollectionGroup::find_by_collection(&col.uuid, &mut conn)
|
||||||
.await
|
.await
|
||||||
|
@ -334,29 +352,9 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose,
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
} else {
|
} else {
|
||||||
// The Bitwarden clients seem to call this API regardless of whether groups are enabled,
|
|
||||||
// so just act as if there are no groups.
|
|
||||||
Vec::with_capacity(0)
|
Vec::with_capacity(0)
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut assigned = false;
|
|
||||||
let users: Vec<Value> = coll_users
|
|
||||||
.iter()
|
|
||||||
.filter(|collection_user| collection_user.collection_uuid == col.uuid)
|
|
||||||
.map(|collection_user| {
|
|
||||||
// Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `CollectionUser::find_by_organization` call.
|
|
||||||
// We check here if the current user is assigned to this collection or not.
|
|
||||||
if collection_user.user_uuid == user_org.uuid {
|
|
||||||
assigned = true;
|
|
||||||
}
|
|
||||||
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if user_org.access_all {
|
|
||||||
assigned = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut json_object = col.to_json();
|
let mut json_object = col.to_json();
|
||||||
json_object["Assigned"] = json!(assigned);
|
json_object["Assigned"] = json!(assigned);
|
||||||
json_object["Users"] = json!(users);
|
json_object["Users"] = json!(users);
|
||||||
|
@ -397,7 +395,7 @@ async fn post_organization_collections(
|
||||||
EventType::CollectionCreated as i32,
|
EventType::CollectionCreated as i32,
|
||||||
&collection.uuid,
|
&collection.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -478,7 +476,7 @@ async fn post_organization_collection_update(
|
||||||
EventType::CollectionUpdated as i32,
|
EventType::CollectionUpdated as i32,
|
||||||
&collection.uuid,
|
&collection.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -566,7 +564,7 @@ async fn _delete_organization_collection(
|
||||||
EventType::CollectionDeleted as i32,
|
EventType::CollectionDeleted as i32,
|
||||||
&collection.uuid,
|
&collection.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -612,7 +610,6 @@ async fn post_organization_collection_delete(
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct BulkCollectionIds {
|
struct BulkCollectionIds {
|
||||||
Ids: Vec<String>,
|
Ids: Vec<String>,
|
||||||
OrganizationId: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/organizations/<org_id>/collections", data = "<data>")]
|
#[delete("/organizations/<org_id>/collections", data = "<data>")]
|
||||||
|
@ -623,9 +620,6 @@ async fn bulk_delete_organization_collections(
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
let data: BulkCollectionIds = data.into_inner().data;
|
let data: BulkCollectionIds = data.into_inner().data;
|
||||||
if org_id != data.OrganizationId {
|
|
||||||
err!("OrganizationId mismatch");
|
|
||||||
}
|
|
||||||
|
|
||||||
let collections = data.Ids;
|
let collections = data.Ids;
|
||||||
|
|
||||||
|
@ -670,24 +664,16 @@ async fn get_org_collection_detail(
|
||||||
Vec::with_capacity(0)
|
Vec::with_capacity(0)
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut assigned = false;
|
|
||||||
let users: Vec<Value> =
|
let users: Vec<Value> =
|
||||||
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn)
|
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(|collection_user| {
|
.map(|collection_user| {
|
||||||
// Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `find_by_collection_swap_user_uuid_with_org_user_uuid` call.
|
|
||||||
// We check here if the current user is assigned to this collection or not.
|
|
||||||
if collection_user.user_uuid == user_org.uuid {
|
|
||||||
assigned = true;
|
|
||||||
}
|
|
||||||
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
|
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if user_org.access_all {
|
let assigned = Collection::can_access_collection(&user_org, &collection.uuid, &mut conn).await;
|
||||||
assigned = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut json_object = collection.to_json();
|
let mut json_object = collection.to_json();
|
||||||
json_object["Assigned"] = json!(assigned);
|
json_object["Assigned"] = json!(assigned);
|
||||||
|
@ -947,7 +933,7 @@ async fn send_invite(
|
||||||
EventType::OrganizationUserInvited as i32,
|
EventType::OrganizationUserInvited as i32,
|
||||||
&new_user.uuid,
|
&new_user.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1077,7 +1063,7 @@ async fn accept_invite(
|
||||||
let claims = decode_invite(&data.Token)?;
|
let claims = decode_invite(&data.Token)?;
|
||||||
|
|
||||||
match User::find_by_mail(&claims.email, &mut conn).await {
|
match User::find_by_mail(&claims.email, &mut conn).await {
|
||||||
Some(_) => {
|
Some(user) => {
|
||||||
Invitation::take(&claims.email, &mut conn).await;
|
Invitation::take(&claims.email, &mut conn).await;
|
||||||
|
|
||||||
if let (Some(user_org), Some(org)) = (&claims.user_org_id, &claims.org_id) {
|
if let (Some(user_org), Some(org)) = (&claims.user_org_id, &claims.org_id) {
|
||||||
|
@ -1101,7 +1087,11 @@ async fn accept_invite(
|
||||||
match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, &mut conn).await {
|
match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, &mut conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot join this organization until you enable two-step login on your user account");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::activate_email_2fa(&user, &mut conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot join this organization until you enable two-step login on your user account");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot join this organization because you are a member of an organization which forbids it");
|
err!("You cannot join this organization because you are a member of an organization which forbids it");
|
||||||
|
@ -1226,10 +1216,14 @@ async fn _confirm_invite(
|
||||||
match OrgPolicy::is_user_allowed(&user_to_confirm.user_uuid, org_id, true, conn).await {
|
match OrgPolicy::is_user_allowed(&user_to_confirm.user_uuid, org_id, true, conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot confirm this user because it has no two-step login method activated");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::find_and_activate_email_2fa(&user_to_confirm.user_uuid, conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot confirm this user because they have not setup 2FA");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot confirm this user because it is a member of an organization which forbids it");
|
err!("You cannot confirm this user because they are a member of an organization which forbids it");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1241,7 +1235,7 @@ async fn _confirm_invite(
|
||||||
EventType::OrganizationUserConfirmed as i32,
|
EventType::OrganizationUserConfirmed as i32,
|
||||||
&user_to_confirm.uuid,
|
&user_to_confirm.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -1357,10 +1351,14 @@ async fn edit_user(
|
||||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, org_id, true, &mut conn).await {
|
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, org_id, true, &mut conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot modify this user to this type because they have not setup 2FA");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
err!("You cannot modify this user to this type because they are a member of an organization which forbids it");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1403,7 +1401,7 @@ async fn edit_user(
|
||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&user_to_edit.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1495,7 +1493,7 @@ async fn _delete_user(
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_to_delete.uuid,
|
&user_to_delete.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -1519,9 +1517,9 @@ async fn bulk_public_keys(
|
||||||
let data: OrgBulkIds = data.into_inner().data;
|
let data: OrgBulkIds = data.into_inner().data;
|
||||||
|
|
||||||
let mut bulk_response = Vec::new();
|
let mut bulk_response = Vec::new();
|
||||||
// Check all received UserOrg UUID's and find the matching User to retreive the public-key.
|
// Check all received UserOrg UUID's and find the matching User to retrieve the public-key.
|
||||||
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
|
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
|
||||||
// The web-vault will then ignore that user for the folowing steps.
|
// The web-vault will then ignore that user for the following steps.
|
||||||
for user_org_id in data.Ids {
|
for user_org_id in data.Ids {
|
||||||
match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
|
match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
|
||||||
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
|
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
|
||||||
|
@ -1604,7 +1602,7 @@ async fn post_org_import(
|
||||||
let mut ciphers = Vec::new();
|
let mut ciphers = Vec::new();
|
||||||
for cipher_data in data.Ciphers {
|
for cipher_data in data.Ciphers {
|
||||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok();
|
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await.ok();
|
||||||
ciphers.push(cipher);
|
ciphers.push(cipher);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1698,38 +1696,16 @@ async fn put_policy(
|
||||||
None => err!("Invalid or unsupported policy type"),
|
None => err!("Invalid or unsupported policy type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// When enabling the TwoFactorAuthentication policy, remove this org's members that do have 2FA
|
// When enabling the TwoFactorAuthentication policy, revoke all members that do not have 2FA
|
||||||
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
|
||||||
for member in UserOrganization::find_by_org(org_id, &mut conn).await.into_iter() {
|
two_factor::enforce_2fa_policy_for_org(
|
||||||
let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &mut conn).await.is_empty();
|
org_id,
|
||||||
|
&headers.user.uuid,
|
||||||
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
headers.device.atype,
|
||||||
// Invited users still need to accept the invite and will get an error when they try to accept the invite.
|
&headers.ip.ip,
|
||||||
if user_twofactor_disabled
|
&mut conn,
|
||||||
&& member.atype < UserOrgType::Admin
|
)
|
||||||
&& member.status != UserOrgStatus::Invited as i32
|
.await?;
|
||||||
{
|
|
||||||
if CONFIG.mail_enabled() {
|
|
||||||
let org = Organization::find_by_uuid(&member.org_uuid, &mut conn).await.unwrap();
|
|
||||||
let user = User::find_by_uuid(&member.user_uuid, &mut conn).await.unwrap();
|
|
||||||
|
|
||||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
log_event(
|
|
||||||
EventType::OrganizationUserRemoved as i32,
|
|
||||||
&member.uuid,
|
|
||||||
org_id,
|
|
||||||
headers.user.uuid.clone(),
|
|
||||||
headers.device.atype,
|
|
||||||
&headers.ip.ip,
|
|
||||||
&mut conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
member.delete(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// When enabling the SingleOrg policy, remove this org's members that are members of other orgs
|
// When enabling the SingleOrg policy, remove this org's members that are members of other orgs
|
||||||
|
@ -1754,7 +1730,7 @@ async fn put_policy(
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&member.uuid,
|
&member.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1779,7 +1755,7 @@ async fn put_policy(
|
||||||
EventType::PolicyUpdated as i32,
|
EventType::PolicyUpdated as i32,
|
||||||
&policy.uuid,
|
&policy.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1810,12 +1786,28 @@ fn get_plans() -> Json<Value> {
|
||||||
"Product": 0,
|
"Product": 0,
|
||||||
"Name": "Free",
|
"Name": "Free",
|
||||||
"NameLocalizationKey": "planNameFree",
|
"NameLocalizationKey": "planNameFree",
|
||||||
|
"BitwardenProduct": 0,
|
||||||
|
"MaxUsers": 0,
|
||||||
|
"DescriptionLocalizationKey": "planDescFree"
|
||||||
|
},{
|
||||||
|
"Object": "plan",
|
||||||
|
"Type": 0,
|
||||||
|
"Product": 1,
|
||||||
|
"Name": "Free",
|
||||||
|
"NameLocalizationKey": "planNameFree",
|
||||||
|
"BitwardenProduct": 1,
|
||||||
|
"MaxUsers": 0,
|
||||||
"DescriptionLocalizationKey": "planDescFree"
|
"DescriptionLocalizationKey": "planDescFree"
|
||||||
}],
|
}],
|
||||||
"ContinuationToken": null
|
"ContinuationToken": null
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/plans/all")]
|
||||||
|
fn get_plans_all() -> Json<Value> {
|
||||||
|
get_plans()
|
||||||
|
}
|
||||||
|
|
||||||
#[get("/plans/sales-tax-rates")]
|
#[get("/plans/sales-tax-rates")]
|
||||||
fn get_plans_tax_rates(_headers: Headers) -> Json<Value> {
|
fn get_plans_tax_rates(_headers: Headers) -> Json<Value> {
|
||||||
// Prevent a 404 error, which also causes Javascript errors.
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
|
@ -1865,7 +1857,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
||||||
// This means that this endpoint can end up removing users that were added manually by an admin,
|
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||||
// as opposed to upstream which only removes auto-imported users.
|
// as opposed to upstream which only removes auto-imported users.
|
||||||
|
|
||||||
// User needs to be admin or owner to use the Directry Connector
|
// User needs to be admin or owner to use the Directory Connector
|
||||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
|
||||||
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||||
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||||
|
@ -1880,7 +1872,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1910,7 +1902,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
||||||
EventType::OrganizationUserInvited as i32,
|
EventType::OrganizationUserInvited as i32,
|
||||||
&new_org_user.uuid,
|
&new_org_user.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -1946,7 +1938,7 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
|
||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -2059,7 +2051,7 @@ async fn _revoke_organization_user(
|
||||||
EventType::OrganizationUserRevoked as i32,
|
EventType::OrganizationUserRevoked as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -2163,10 +2155,14 @@ async fn _restore_organization_user(
|
||||||
match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, conn).await {
|
match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot restore this user because it has no two-step login method activated");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::find_and_activate_email_2fa(&user_org.user_uuid, conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot restore this user because they have not setup 2FA");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot restore this user because it is a member of an organization which forbids it");
|
err!("You cannot restore this user because they are a member of an organization which forbids it");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2178,7 +2174,7 @@ async fn _restore_organization_user(
|
||||||
EventType::OrganizationUserRestored as i32,
|
EventType::OrganizationUserRestored as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -2235,7 +2231,7 @@ impl GroupRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_group(&self, mut group: Group) -> Group {
|
pub fn update_group(&self, mut group: Group) -> Group {
|
||||||
group.name = self.Name.clone();
|
group.name.clone_from(&self.Name);
|
||||||
group.access_all = self.AccessAll.unwrap_or(false);
|
group.access_all = self.AccessAll.unwrap_or(false);
|
||||||
// Group Updates do not support changing the external_id
|
// Group Updates do not support changing the external_id
|
||||||
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
||||||
|
@ -2307,7 +2303,7 @@ async fn post_groups(
|
||||||
EventType::GroupCreated as i32,
|
EventType::GroupCreated as i32,
|
||||||
&group.uuid,
|
&group.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -2344,7 +2340,7 @@ async fn put_group(
|
||||||
EventType::GroupUpdated as i32,
|
EventType::GroupUpdated as i32,
|
||||||
&updated_group.uuid,
|
&updated_group.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -2377,7 +2373,7 @@ async fn add_update_group(
|
||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
&assigned_user_id,
|
&assigned_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -2432,7 +2428,7 @@ async fn _delete_group(org_id: &str, group_id: &str, headers: &AdminHeaders, con
|
||||||
EventType::GroupDeleted as i32,
|
EventType::GroupDeleted as i32,
|
||||||
&group.uuid,
|
&group.uuid,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
conn,
|
conn,
|
||||||
|
@ -2523,7 +2519,7 @@ async fn put_group_users(
|
||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
&assigned_user_id,
|
&assigned_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -2601,7 +2597,7 @@ async fn put_user_groups(
|
||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
org_user_id,
|
org_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -2656,7 +2652,7 @@ async fn delete_group_user(
|
||||||
EventType::OrganizationUserUpdatedGroups as i32,
|
EventType::OrganizationUserUpdatedGroups as i32,
|
||||||
org_user_id,
|
org_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -2671,6 +2667,7 @@ async fn delete_group_user(
|
||||||
struct OrganizationUserResetPasswordEnrollmentRequest {
|
struct OrganizationUserResetPasswordEnrollmentRequest {
|
||||||
ResetPasswordKey: Option<String>,
|
ResetPasswordKey: Option<String>,
|
||||||
MasterPasswordHash: Option<String>,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
|
@ -2745,7 +2742,7 @@ async fn put_reset_password(
|
||||||
EventType::OrganizationUserAdminResetPassword as i32,
|
EventType::OrganizationUserAdminResetPassword as i32,
|
||||||
org_user_id,
|
org_user_id,
|
||||||
org_id,
|
org_id,
|
||||||
headers.user.uuid.clone(),
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
&headers.ip.ip,
|
&headers.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
|
@ -2853,14 +2850,12 @@ async fn put_reset_password_enrollment(
|
||||||
}
|
}
|
||||||
|
|
||||||
if reset_request.ResetPasswordKey.is_some() {
|
if reset_request.ResetPasswordKey.is_some() {
|
||||||
match reset_request.MasterPasswordHash {
|
PasswordOrOtpData {
|
||||||
Some(password) => {
|
MasterPasswordHash: reset_request.MasterPasswordHash,
|
||||||
if !headers.user.check_valid_password(&password) {
|
Otp: reset_request.Otp,
|
||||||
err!("Invalid or wrong password")
|
}
|
||||||
}
|
.validate(&headers.user, true, &mut conn)
|
||||||
}
|
.await?;
|
||||||
None => err!("No password provided"),
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
org_user.reset_password_key = reset_request.ResetPasswordKey;
|
org_user.reset_password_key = reset_request.ResetPasswordKey;
|
||||||
|
@ -2872,15 +2867,14 @@ async fn put_reset_password_enrollment(
|
||||||
EventType::OrganizationUserResetPasswordWithdraw as i32
|
EventType::OrganizationUserResetPasswordWithdraw as i32
|
||||||
};
|
};
|
||||||
|
|
||||||
log_event(log_id, org_user_id, org_id, headers.user.uuid.clone(), headers.device.atype, &headers.ip.ip, &mut conn)
|
log_event(log_id, org_user_id, org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a new function active since the v2022.9.x clients.
|
// This is a new function active since the v2022.9.x clients.
|
||||||
// It combines the previous two calls done before.
|
// It combines the previous two calls done before.
|
||||||
// We call those two functions here and combine them our selfs.
|
// We call those two functions here and combine them ourselves.
|
||||||
//
|
//
|
||||||
// NOTE: It seems clients can't handle uppercase-first keys!!
|
// NOTE: It seems clients can't handle uppercase-first keys!!
|
||||||
// We need to convert all keys so they have the first character to be a lowercase.
|
// We need to convert all keys so they have the first character to be a lowercase.
|
||||||
|
@ -2928,18 +2922,16 @@ async fn get_org_export(org_id: &str, headers: AdminHeaders, mut conn: DbConn) -
|
||||||
|
|
||||||
async fn _api_key(
|
async fn _api_key(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
rotate: bool,
|
rotate: bool,
|
||||||
headers: AdminHeaders,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
// Validate the admin users password
|
// Validate the admin users password/otp
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
|
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
|
||||||
Some(mut org_api_key) => {
|
Some(mut org_api_key) => {
|
||||||
|
@ -2966,14 +2958,14 @@ async fn _api_key(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/api-key", data = "<data>")]
|
#[post("/organizations/<org_id>/api-key", data = "<data>")]
|
||||||
async fn api_key(org_id: &str, data: JsonUpcase<PasswordData>, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
async fn api_key(org_id: &str, data: JsonUpcase<PasswordOrOtpData>, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
_api_key(org_id, data, false, headers, conn).await
|
_api_key(org_id, data, false, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/organizations/<org_id>/rotate-api-key", data = "<data>")]
|
#[post("/organizations/<org_id>/rotate-api-key", data = "<data>")]
|
||||||
async fn rotate_api_key(
|
async fn rotate_api_key(
|
||||||
org_id: &str,
|
org_id: &str,
|
||||||
data: JsonUpcase<PasswordData>,
|
data: JsonUpcase<PasswordOrOtpData>,
|
||||||
headers: AdminHeaders,
|
headers: AdminHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
|
|
|
@ -56,16 +56,34 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||||
if let Some(mut user_org) =
|
if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
user_org.revoke();
|
// Only revoke a user if it is not the last confirmed owner
|
||||||
user_org.save(&mut conn).await?;
|
let revoked = if user_org.atype == UserOrgType::Owner
|
||||||
}
|
&& user_org.status == UserOrgStatus::Confirmed as i32
|
||||||
|
{
|
||||||
|
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await
|
||||||
|
<= 1
|
||||||
|
{
|
||||||
|
warn!("Can't revoke the last owner");
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
user_org.revoke()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user_org.revoke()
|
||||||
|
};
|
||||||
|
|
||||||
|
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
|
if revoked || ext_modified {
|
||||||
|
user_org.save(&mut conn).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
// If user is part of the organization, restore it
|
// If user is part of the organization, restore it
|
||||||
} else if let Some(mut user_org) =
|
} else if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
if user_org.status < UserOrgStatus::Revoked as i32 {
|
let restored = user_org.restore();
|
||||||
user_org.restore();
|
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
|
if restored || ext_modified {
|
||||||
user_org.save(&mut conn).await?;
|
user_org.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -73,9 +91,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
||||||
Some(user) => user, // exists in vaultwarden
|
Some(user) => user, // exists in vaultwarden
|
||||||
None => {
|
None => {
|
||||||
// doesn't exist in vaultwarden
|
// User does not exist yet
|
||||||
let mut new_user = User::new(user_data.Email.clone());
|
let mut new_user = User::new(user_data.Email.clone());
|
||||||
new_user.set_external_id(Some(user_data.ExternalId.clone()));
|
|
||||||
new_user.save(&mut conn).await?;
|
new_user.save(&mut conn).await?;
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
|
@ -85,13 +102,14 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||||
new_user
|
new_user
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let user_org_status = if CONFIG.mail_enabled() {
|
let user_org_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
|
||||||
UserOrgStatus::Invited as i32
|
UserOrgStatus::Invited as i32
|
||||||
} else {
|
} else {
|
||||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||||
|
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||||
new_org_user.access_all = false;
|
new_org_user.access_all = false;
|
||||||
new_org_user.atype = UserOrgType::User as i32;
|
new_org_user.atype = UserOrgType::User as i32;
|
||||||
new_org_user.status = user_org_status;
|
new_org_user.status = user_org_status;
|
||||||
|
@ -119,7 +137,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||||
|
|
||||||
if CONFIG.org_groups_enabled() {
|
if CONFIG.org_groups_enabled() {
|
||||||
for group_data in &data.Groups {
|
for group_data in &data.Groups {
|
||||||
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
|
let group_uuid = match Group::find_by_external_id_and_org(&group_data.ExternalId, &org_id, &mut conn).await
|
||||||
|
{
|
||||||
Some(group) => group.uuid,
|
Some(group) => group.uuid,
|
||||||
None => {
|
None => {
|
||||||
let mut group =
|
let mut group =
|
||||||
|
@ -132,12 +151,10 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||||
|
|
||||||
for ext_id in &group_data.MemberExternalIds {
|
for ext_id in &group_data.MemberExternalIds {
|
||||||
if let Some(user) = User::find_by_external_id(ext_id, &mut conn).await {
|
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
||||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await
|
{
|
||||||
{
|
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
group_user.save(&mut conn).await?;
|
||||||
group_user.save(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -150,10 +167,8 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
||||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
||||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||||
if let Some(user_external_id) =
|
if let Some(ref user_external_id) = user_org.external_id {
|
||||||
User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.external_id)
|
if !sync_members.contains(user_external_id) {
|
||||||
{
|
|
||||||
if user_external_id.is_some() && !sync_members.contains(&user_external_id.unwrap()) {
|
|
||||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
||||||
// Removing owner, check that there is at least one other confirmed owner
|
// Removing owner, check that there is at least one other confirmed owner
|
||||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
||||||
|
@ -195,19 +210,15 @@ impl<'r> FromRequest<'r> for PublicToken {
|
||||||
Err(_) => err_handler!("Invalid claim"),
|
Err(_) => err_handler!("Invalid claim"),
|
||||||
};
|
};
|
||||||
// Check if time is between claims.nbf and claims.exp
|
// Check if time is between claims.nbf and claims.exp
|
||||||
let time_now = Utc::now().naive_utc().timestamp();
|
let time_now = Utc::now().timestamp();
|
||||||
if time_now < claims.nbf {
|
if time_now < claims.nbf {
|
||||||
err_handler!("Token issued in the future");
|
err_handler!("Token issued in the future");
|
||||||
}
|
}
|
||||||
if time_now > claims.exp {
|
if time_now > claims.exp {
|
||||||
err_handler!("Token expired");
|
err_handler!("Token expired");
|
||||||
}
|
}
|
||||||
// Check if claims.iss is host|claims.scope[0]
|
// Check if claims.iss is domain|claims.scope[0]
|
||||||
let host = match auth::Host::from_request(request).await {
|
let complete_host = format!("{}|{}", CONFIG.domain_origin(), claims.scope[0]);
|
||||||
Outcome::Success(host) => host,
|
|
||||||
_ => err_handler!("Error getting Host"),
|
|
||||||
};
|
|
||||||
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
|
|
||||||
if complete_host != claims.iss {
|
if complete_host != claims.iss {
|
||||||
err_handler!("Token not issued by this server");
|
err_handler!("Token not issued by this server");
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
|
use num_traits::ToPrimitive;
|
||||||
use rocket::form::Form;
|
use rocket::form::Form;
|
||||||
use rocket::fs::NamedFile;
|
use rocket::fs::NamedFile;
|
||||||
use rocket::fs::TempFile;
|
use rocket::fs::TempFile;
|
||||||
|
@ -8,17 +9,17 @@ use rocket::serde::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
auth::{ClientIp, Headers, Host},
|
auth::{ClientIp, Headers, Host},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
util::SafeString,
|
util::{NumberOrString, SafeString},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||||
|
|
||||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||||
const SIZE_525_MB: u64 = 550_502_400;
|
const SIZE_525_MB: i64 = 550_502_400;
|
||||||
|
|
||||||
pub fn routes() -> Vec<rocket::Route> {
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
routes![
|
routes![
|
||||||
|
@ -48,7 +49,7 @@ pub async fn purge_sends(pool: DbPool) {
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct SendData {
|
pub struct SendData {
|
||||||
Type: i32,
|
Type: i32,
|
||||||
Key: String,
|
Key: String,
|
||||||
Password: Option<String>,
|
Password: Option<String>,
|
||||||
|
@ -64,6 +65,9 @@ struct SendData {
|
||||||
Text: Option<Value>,
|
Text: Option<Value>,
|
||||||
File: Option<Value>,
|
File: Option<Value>,
|
||||||
FileLength: Option<NumberOrString>,
|
FileLength: Option<NumberOrString>,
|
||||||
|
|
||||||
|
// Used for key rotations
|
||||||
|
pub Id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||||
|
@ -118,7 +122,7 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||||
err!("Send data not provided");
|
err!("Send data not provided");
|
||||||
};
|
};
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
if data.DeletionDate > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
err!(
|
err!(
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
);
|
);
|
||||||
|
@ -216,30 +220,41 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||||
} = data.into_inner();
|
} = data.into_inner();
|
||||||
let model = model.into_inner().data;
|
let model = model.into_inner().data;
|
||||||
|
|
||||||
|
let Some(size) = data.len().to_i64() else {
|
||||||
|
err!("Invalid send size");
|
||||||
|
};
|
||||||
|
if size < 0 {
|
||||||
|
err!("Send size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let size_limit = match CONFIG.user_attachment_limit() {
|
let size_limit = match CONFIG.user_send_limit() {
|
||||||
Some(0) => err!("File uploads are disabled"),
|
Some(0) => err!("File uploads are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||||
|
err!("Existing sends overflow")
|
||||||
|
};
|
||||||
|
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||||
|
err!("Send size overflow");
|
||||||
|
};
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Send storage limit reached! Delete some sends to free up space")
|
||||||
}
|
}
|
||||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
i64::clamp(left, 0, SIZE_525_MB)
|
||||||
}
|
}
|
||||||
None => SIZE_525_MB,
|
None => SIZE_525_MB,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if size > size_limit {
|
||||||
|
err!("Send storage limit exceeded with this file");
|
||||||
|
}
|
||||||
|
|
||||||
let mut send = create_send(model, headers.user.uuid)?;
|
let mut send = create_send(model, headers.user.uuid)?;
|
||||||
if send.atype != SendType::File as i32 {
|
if send.atype != SendType::File as i32 {
|
||||||
err!("Send content is not a file");
|
err!("Send content is not a file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let size = data.len();
|
|
||||||
if size > size_limit {
|
|
||||||
err!("Attachment storage limit exceeded with this file");
|
|
||||||
}
|
|
||||||
|
|
||||||
let file_id = crate::crypto::generate_send_id();
|
let file_id = crate::crypto::generate_send_id();
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||||
let file_path = folder_path.join(&file_id);
|
let file_path = folder_path.join(&file_id);
|
||||||
|
@ -253,7 +268,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id));
|
o.insert(String::from("Id"), Value::String(file_id));
|
||||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
|
|
||||||
|
@ -285,24 +300,32 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let file_length = match &data.FileLength {
|
let file_length = match &data.FileLength {
|
||||||
Some(m) => Some(m.into_i32()?),
|
Some(m) => m.into_i64()?,
|
||||||
_ => None,
|
_ => err!("Invalid send length"),
|
||||||
};
|
};
|
||||||
|
if file_length < 0 {
|
||||||
|
err!("Send size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
let size_limit = match CONFIG.user_attachment_limit() {
|
let size_limit = match CONFIG.user_send_limit() {
|
||||||
Some(0) => err!("File uploads are disabled"),
|
Some(0) => err!("File uploads are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||||
|
err!("Existing sends overflow")
|
||||||
|
};
|
||||||
|
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||||
|
err!("Send size overflow");
|
||||||
|
};
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Send storage limit reached! Delete some sends to free up space")
|
||||||
}
|
}
|
||||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
i64::clamp(left, 0, SIZE_525_MB)
|
||||||
}
|
}
|
||||||
None => SIZE_525_MB,
|
None => SIZE_525_MB,
|
||||||
};
|
};
|
||||||
|
|
||||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
if file_length > size_limit {
|
||||||
err!("Attachment storage limit exceeded with this file");
|
err!("Send storage limit exceeded with this file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut send = create_send(data, headers.user.uuid)?;
|
let mut send = create_send(data, headers.user.uuid)?;
|
||||||
|
@ -312,8 +335,8 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
||||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
||||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
o.insert(String::from("Size"), Value::Number(file_length.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
send.save(&mut conn).await?;
|
send.save(&mut conn).await?;
|
||||||
|
@ -340,9 +363,13 @@ async fn post_send_file_v2_data(
|
||||||
|
|
||||||
let mut data = data.into_inner();
|
let mut data = data.into_inner();
|
||||||
|
|
||||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else { err!("Send not found. Unable to save the file.") };
|
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else {
|
||||||
|
err!("Send not found. Unable to save the file.")
|
||||||
|
};
|
||||||
|
|
||||||
let Some(send_user_id) = &send.user_uuid else {err!("Sends are only supported for users at the moment")};
|
let Some(send_user_id) = &send.user_uuid else {
|
||||||
|
err!("Sends are only supported for users at the moment")
|
||||||
|
};
|
||||||
if send_user_id != &headers.user.uuid {
|
if send_user_id != &headers.user.uuid {
|
||||||
err!("Send doesn't belong to user");
|
err!("Send doesn't belong to user");
|
||||||
}
|
}
|
||||||
|
@ -525,6 +552,19 @@ async fn put_send(
|
||||||
None => err!("Send not found"),
|
None => err!("Send not found"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_send_from_data(
|
||||||
|
send: &mut Send,
|
||||||
|
data: SendData,
|
||||||
|
headers: &Headers,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
nt: &Notify<'_>,
|
||||||
|
ut: UpdateType,
|
||||||
|
) -> EmptyResult {
|
||||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
err!("Send is not owned by user")
|
err!("Send is not owned by user")
|
||||||
}
|
}
|
||||||
|
@ -533,6 +573,12 @@ async fn put_send(
|
||||||
err!("Sends can't change type")
|
err!("Sends can't change type")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if data.DeletionDate > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||||
// so we only need to update the data field in the Text case
|
// so we only need to update the data field in the Text case
|
||||||
if data.Type == SendType::Text as i32 {
|
if data.Type == SendType::Text as i32 {
|
||||||
|
@ -545,11 +591,6 @@ async fn put_send(
|
||||||
send.data = data_str;
|
send.data = data_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
|
||||||
err!(
|
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
send.name = data.Name;
|
send.name = data.Name;
|
||||||
send.akey = data.Key;
|
send.akey = data.Key;
|
||||||
send.deletion_date = data.DeletionDate.naive_utc();
|
send.deletion_date = data.DeletionDate.naive_utc();
|
||||||
|
@ -567,17 +608,11 @@ async fn put_send(
|
||||||
send.set_password(Some(&password));
|
send.set_password(Some(&password));
|
||||||
}
|
}
|
||||||
|
|
||||||
send.save(&mut conn).await?;
|
send.save(conn).await?;
|
||||||
nt.send_send_update(
|
if ut != UpdateType::None {
|
||||||
UpdateType::SyncSendUpdate,
|
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
|
||||||
&send,
|
}
|
||||||
&send.update_users_revision(&mut conn).await,
|
Ok(())
|
||||||
&headers.device.uuid,
|
|
||||||
&mut conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/sends/<id>")]
|
#[delete("/sends/<id>")]
|
||||||
|
|
|
@ -5,7 +5,7 @@ use rocket::Route;
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
||||||
NumberOrString, PasswordData,
|
PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::{ClientIp, Headers},
|
auth::{ClientIp, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
|
@ -13,6 +13,7 @@ use crate::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
|
util::NumberOrString,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use crate::config::CONFIG;
|
pub use crate::config::CONFIG;
|
||||||
|
@ -22,13 +23,11 @@ pub fn routes() -> Vec<Route> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let type_ = TwoFactorType::Authenticator as i32;
|
let type_ = TwoFactorType::Authenticator as i32;
|
||||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
||||||
|
@ -48,9 +47,10 @@ async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EnableAuthenticatorData {
|
struct EnableAuthenticatorData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Key: String,
|
Key: String,
|
||||||
Token: NumberOrString,
|
Token: NumberOrString,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/authenticator", data = "<data>")]
|
#[post("/two-factor/authenticator", data = "<data>")]
|
||||||
|
@ -60,15 +60,17 @@ async fn activate_authenticator(
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let key = data.Key;
|
let key = data.Key;
|
||||||
let token = data.Token.into_string();
|
let token = data.Token.into_string();
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Validate key as base32 and 20 bytes length
|
// Validate key as base32 and 20 bytes length
|
||||||
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
||||||
|
@ -154,8 +156,8 @@ pub async fn validate_totp_code(
|
||||||
let time = (current_timestamp + step * 30i64) as u64;
|
let time = (current_timestamp + step * 30i64) as u64;
|
||||||
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||||
|
|
||||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
// Check the given code equals the generated and if the time_step is larger then the one last used.
|
||||||
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
|
if generated == totp_code && time_step > twofactor.last_used {
|
||||||
// If the step does not equals 0 the time is drifted either server or client side.
|
// If the step does not equals 0 the time is drifted either server or client side.
|
||||||
if step != 0 {
|
if step != 0 {
|
||||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||||
|
@ -163,10 +165,10 @@ pub async fn validate_totp_code(
|
||||||
|
|
||||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||||
// This will also save a newly created twofactor if the code is correct.
|
// This will also save a newly created twofactor if the code is correct.
|
||||||
twofactor.last_used = time_step as i32;
|
twofactor.last_used = time_step;
|
||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
|
} else if generated == totp_code && time_step <= twofactor.last_used {
|
||||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||||
err!(
|
err!(
|
||||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||||
|
@ -177,7 +179,7 @@ pub async fn validate_totp_code(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Else no valide code received, deny access
|
// Else no valid code received, deny access
|
||||||
err!(
|
err!(
|
||||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
|
|
|
@ -6,7 +6,7 @@ use rocket::Route;
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||||
PasswordData,
|
PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
|
@ -92,14 +92,13 @@ impl DuoStatus {
|
||||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||||
|
|
||||||
#[post("/two-factor/get-duo", data = "<data>")]
|
#[post("/two-factor/get-duo", data = "<data>")]
|
||||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = get_user_duo_data(&headers.user.uuid, &mut conn).await;
|
let data = get_user_duo_data(&user.uuid, &mut conn).await;
|
||||||
|
|
||||||
let (enabled, data) = match data {
|
let (enabled, data) = match data {
|
||||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||||
|
@ -129,10 +128,11 @@ async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbC
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case, dead_code)]
|
#[allow(non_snake_case, dead_code)]
|
||||||
struct EnableDuoData {
|
struct EnableDuoData {
|
||||||
MasterPasswordHash: String,
|
|
||||||
Host: String,
|
Host: String,
|
||||||
SecretKey: String,
|
SecretKey: String,
|
||||||
IntegrationKey: String,
|
IntegrationKey: String,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<EnableDuoData> for DuoData {
|
impl From<EnableDuoData> for DuoData {
|
||||||
|
@ -159,9 +159,12 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
||||||
let data: EnableDuoData = data.into_inner().data;
|
let data: EnableDuoData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash.clone(),
|
||||||
|
Otp: data.Otp.clone(),
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||||
let data_req: DuoData = data.into();
|
let data_req: DuoData = data.into();
|
||||||
|
|
|
@ -1,16 +1,16 @@
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
|
@ -76,13 +76,11 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
|
||||||
/// When user clicks on Manage email 2FA show the user the related information
|
/// When user clicks on Manage email 2FA show the user the related information
|
||||||
#[post("/two-factor/get-email", data = "<data>")]
|
#[post("/two-factor/get-email", data = "<data>")]
|
||||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (enabled, mfa_email) =
|
let (enabled, mfa_email) =
|
||||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
||||||
|
@ -105,7 +103,8 @@ async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: D
|
||||||
struct SendEmailData {
|
struct SendEmailData {
|
||||||
/// Email where 2FA codes will be sent to, can be different than user email account.
|
/// Email where 2FA codes will be sent to, can be different than user email account.
|
||||||
Email: String,
|
Email: String,
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||||
|
@ -114,9 +113,12 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
||||||
let data: SendEmailData = data.into_inner().data;
|
let data: SendEmailData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, false, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !CONFIG._enable_email_2fa() {
|
if !CONFIG._enable_email_2fa() {
|
||||||
err!("Email 2FA is disabled")
|
err!("Email 2FA is disabled")
|
||||||
|
@ -144,8 +146,9 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct EmailData {
|
struct EmailData {
|
||||||
Email: String,
|
Email: String,
|
||||||
MasterPasswordHash: String,
|
|
||||||
Token: String,
|
Token: String,
|
||||||
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||||
|
@ -154,9 +157,13 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
||||||
let data: EmailData = data.into_inner().data;
|
let data: EmailData = data.into_inner().data;
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
// This is the last step in the verification process, delete the otp directly afterwards
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||||
let mut twofactor =
|
let mut twofactor =
|
||||||
|
@ -225,9 +232,9 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
|
||||||
twofactor.data = email_data.to_json();
|
twofactor.data = email_data.to_json();
|
||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
|
|
||||||
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
|
let date = DateTime::from_timestamp(email_data.token_sent, 0).expect("Email token timestamp invalid.").naive_utc();
|
||||||
let max_time = CONFIG.email_expiration_time() as i64;
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
err!(
|
err!(
|
||||||
"Token has expired",
|
"Token has expired",
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
|
@ -258,14 +265,14 @@ impl EmailTokenData {
|
||||||
EmailTokenData {
|
EmailTokenData {
|
||||||
email,
|
email,
|
||||||
last_token: Some(token),
|
last_token: Some(token),
|
||||||
token_sent: Utc::now().naive_utc().timestamp(),
|
token_sent: Utc::now().timestamp(),
|
||||||
attempts: 0,
|
attempts: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_token(&mut self, token: String) {
|
pub fn set_token(&mut self, token: String) {
|
||||||
self.last_token = Some(token);
|
self.last_token = Some(token);
|
||||||
self.token_sent = Utc::now().naive_utc().timestamp();
|
self.token_sent = Utc::now().timestamp();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_token(&mut self) {
|
pub fn reset_token(&mut self) {
|
||||||
|
@ -290,6 +297,15 @@ impl EmailTokenData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn activate_email_2fa(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if user.verified_at.is_none() {
|
||||||
|
err!("Auto-enabling of email 2FA failed because the users email address has not been verified!");
|
||||||
|
}
|
||||||
|
let twofactor_data = EmailTokenData::new(user.email.clone(), String::new());
|
||||||
|
let twofactor = TwoFactor::new(user.uuid.clone(), TwoFactorType::Email, twofactor_data.to_json());
|
||||||
|
twofactor.save(conn).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
||||||
pub fn obscure_email(email: &str) -> String {
|
pub fn obscure_email(email: &str) -> String {
|
||||||
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
||||||
|
@ -311,6 +327,14 @@ pub fn obscure_email(email: &str) -> String {
|
||||||
format!("{}@{}", new_name, &domain)
|
format!("{}@{}", new_name, &domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
|
||||||
|
activate_email_2fa(&user, conn).await
|
||||||
|
} else {
|
||||||
|
err!("User not found!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
|
@ -1,20 +1,26 @@
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use data_encoding::BASE32;
|
use data_encoding::BASE32;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
api::{
|
||||||
|
core::{log_event, log_user_event},
|
||||||
|
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||||
|
},
|
||||||
auth::{ClientHeaders, Headers},
|
auth::{ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod authenticator;
|
pub mod authenticator;
|
||||||
pub mod duo;
|
pub mod duo;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
|
pub mod protected_actions;
|
||||||
pub mod webauthn;
|
pub mod webauthn;
|
||||||
pub mod yubikey;
|
pub mod yubikey;
|
||||||
|
|
||||||
|
@ -33,6 +39,7 @@ pub fn routes() -> Vec<Route> {
|
||||||
routes.append(&mut email::routes());
|
routes.append(&mut email::routes());
|
||||||
routes.append(&mut webauthn::routes());
|
routes.append(&mut webauthn::routes());
|
||||||
routes.append(&mut yubikey::routes());
|
routes.append(&mut yubikey::routes());
|
||||||
|
routes.append(&mut protected_actions::routes());
|
||||||
|
|
||||||
routes
|
routes
|
||||||
}
|
}
|
||||||
|
@ -50,13 +57,11 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-recover", data = "<data>")]
|
#[post("/two-factor/get-recover", data = "<data>")]
|
||||||
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
|
async fn get_recover(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner().data;
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Code": user.totp_recover,
|
"Code": user.totp_recover,
|
||||||
|
@ -96,6 +101,7 @@ async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeade
|
||||||
|
|
||||||
// Remove all twofactors from the user
|
// Remove all twofactors from the user
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &mut conn).await?;
|
||||||
|
|
||||||
log_user_event(
|
log_user_event(
|
||||||
EventType::UserRecovered2fa as i32,
|
EventType::UserRecovered2fa as i32,
|
||||||
|
@ -123,19 +129,23 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct DisableTwoFactorData {
|
struct DisableTwoFactorData {
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: Option<String>,
|
||||||
|
Otp: Option<String>,
|
||||||
Type: NumberOrString,
|
Type: NumberOrString,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/disable", data = "<data>")]
|
#[post("/two-factor/disable", data = "<data>")]
|
||||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: DisableTwoFactorData = data.into_inner().data;
|
let data: DisableTwoFactorData = data.into_inner().data;
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
// Delete directly after a valid token has been provided
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
MasterPasswordHash: data.MasterPasswordHash,
|
||||||
|
Otp: data.Otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = data.Type.into_i32()?;
|
let type_ = data.Type.into_i32()?;
|
||||||
|
|
||||||
|
@ -145,22 +155,8 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
|
||||||
|
enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
|
||||||
if twofactor_disabled {
|
|
||||||
for user_org in
|
|
||||||
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
{
|
|
||||||
if user_org.atype < UserOrgType::Admin {
|
|
||||||
if CONFIG.mail_enabled() {
|
|
||||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap();
|
|
||||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
|
||||||
}
|
|
||||||
user_org.delete(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
|
@ -175,6 +171,78 @@ async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers:
|
||||||
disable_twofactor(data, headers, conn).await
|
disable_twofactor(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn enforce_2fa_policy(
|
||||||
|
user: &User,
|
||||||
|
act_uuid: &str,
|
||||||
|
device_type: i32,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
{
|
||||||
|
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
||||||
|
if member.atype < UserOrgType::Admin {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
|
}
|
||||||
|
let mut member = member;
|
||||||
|
member.revoke();
|
||||||
|
member.save(conn).await?;
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType::OrganizationUserRevoked as i32,
|
||||||
|
&member.uuid,
|
||||||
|
&member.org_uuid,
|
||||||
|
act_uuid,
|
||||||
|
device_type,
|
||||||
|
ip,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn enforce_2fa_policy_for_org(
|
||||||
|
org_uuid: &str,
|
||||||
|
act_uuid: &str,
|
||||||
|
device_type: i32,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap();
|
||||||
|
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() {
|
||||||
|
// Don't enforce the policy for Admins and Owners.
|
||||||
|
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
|
}
|
||||||
|
let mut member = member;
|
||||||
|
member.revoke();
|
||||||
|
member.save(conn).await?;
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType::OrganizationUserRevoked as i32,
|
||||||
|
&member.uuid,
|
||||||
|
org_uuid,
|
||||||
|
act_uuid,
|
||||||
|
device_type,
|
||||||
|
ip,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||||
debug!("Sending notifications for incomplete 2FA logins");
|
debug!("Sending notifications for incomplete 2FA logins");
|
||||||
|
|
||||||
|
@ -191,7 +259,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||||
};
|
};
|
||||||
|
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap();
|
||||||
let time_before = now - time_limit;
|
let time_before = now - time_limit;
|
||||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
||||||
for login in incomplete_logins {
|
for login in incomplete_logins {
|
||||||
|
|
142
src/api/core/two_factor/protected_actions.rs
Normal file
142
src/api/core/two_factor/protected_actions.rs
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
|
use rocket::Route;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::{EmptyResult, JsonUpcase},
|
||||||
|
auth::Headers,
|
||||||
|
crypto,
|
||||||
|
db::{
|
||||||
|
models::{TwoFactor, TwoFactorType},
|
||||||
|
DbConn,
|
||||||
|
},
|
||||||
|
error::{Error, MapResult},
|
||||||
|
mail, CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![request_otp, verify_otp]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data stored in the TwoFactor table in the db
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct ProtectedActionData {
|
||||||
|
/// Token issued to validate the protected action
|
||||||
|
pub token: String,
|
||||||
|
/// UNIX timestamp of token issue.
|
||||||
|
pub token_sent: i64,
|
||||||
|
// The total amount of attempts
|
||||||
|
pub attempts: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProtectedActionData {
|
||||||
|
pub fn new(token: String) -> Self {
|
||||||
|
Self {
|
||||||
|
token,
|
||||||
|
token_sent: Utc::now().timestamp(),
|
||||||
|
attempts: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json(&self) -> String {
|
||||||
|
serde_json::to_string(&self).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_json(string: &str) -> Result<Self, Error> {
|
||||||
|
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
|
||||||
|
match res {
|
||||||
|
Ok(x) => Ok(x),
|
||||||
|
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_attempt(&mut self) {
|
||||||
|
self.attempts += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/request-otp")]
|
||||||
|
async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
|
// Only one Protected Action per user is allowed to take place, delete the previous one
|
||||||
|
if let Some(pa) =
|
||||||
|
TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::ProtectedActions as i32, &mut conn).await
|
||||||
|
{
|
||||||
|
pa.delete(&mut conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||||
|
let pa_data = ProtectedActionData::new(generated_token);
|
||||||
|
|
||||||
|
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||||
|
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::ProtectedActions, pa_data.to_json());
|
||||||
|
twofactor.save(&mut conn).await?;
|
||||||
|
|
||||||
|
mail::send_protected_action_token(&user.email, &pa_data.token).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct ProtectedActionVerify {
|
||||||
|
OTP: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/verify-otp", data = "<data>")]
|
||||||
|
async fn verify_otp(data: JsonUpcase<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
let data: ProtectedActionVerify = data.into_inner().data;
|
||||||
|
|
||||||
|
// Delete the token after one validation attempt
|
||||||
|
// This endpoint only gets called for the vault export, and doesn't need a second attempt
|
||||||
|
validate_protected_action_otp(&data.OTP, &user.uuid, true, &mut conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn validate_protected_action_otp(
|
||||||
|
otp: &str,
|
||||||
|
user_uuid: &str,
|
||||||
|
delete_if_valid: bool,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn)
|
||||||
|
.await
|
||||||
|
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
||||||
|
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
||||||
|
|
||||||
|
pa_data.add_attempt();
|
||||||
|
// Delete the token after x attempts if it has been used too many times
|
||||||
|
// We use the 6, which should be more then enough for invalid attempts and multiple valid checks
|
||||||
|
if pa_data.attempts > 6 {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the token has expired (Using the email 2fa expiration time)
|
||||||
|
let date =
|
||||||
|
DateTime::from_timestamp(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.").naive_utc();
|
||||||
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !crypto::ct_eq(&pa_data.token, otp) {
|
||||||
|
pa.save(conn).await?;
|
||||||
|
err!("Token is invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
if delete_if_valid {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user