From 2d5f172e777e09a47c79d8437d94df160afd6e38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Garc=C3=ADa?= Date: Sun, 7 Nov 2021 18:53:39 +0100 Subject: [PATCH 1/8] Update to rocket 0.5 and made code async, missing updating all db calls, that are currently blocking --- Cargo.lock | 874 ++++++++++++++--------- Cargo.toml | 27 +- Rocket.toml | 2 - rust-toolchain | 2 +- src/api/admin.rs | 65 +- src/api/core/accounts.rs | 2 +- src/api/core/ciphers.rs | 258 +++---- src/api/core/emergency_access.rs | 10 +- src/api/core/folders.rs | 2 +- src/api/core/mod.rs | 8 +- src/api/core/organizations.rs | 12 +- src/api/core/sends.rs | 85 +-- src/api/core/two_factor/authenticator.rs | 2 +- src/api/core/two_factor/duo.rs | 15 +- src/api/core/two_factor/email.rs | 2 +- src/api/core/two_factor/mod.rs | 6 +- src/api/core/two_factor/u2f.rs | 2 +- src/api/core/two_factor/webauthn.rs | 2 +- src/api/core/two_factor/yubikey.rs | 2 +- src/api/icons.rs | 125 ++-- src/api/identity.rs | 71 +- src/api/mod.rs | 2 +- src/api/notifications.rs | 15 +- src/api/web.rs | 49 +- src/auth.rs | 262 ++++--- src/config.rs | 31 + src/db/mod.rs | 231 ++++-- src/error.rs | 10 +- src/main.rs | 98 ++- src/util.rs | 70 +- 30 files changed, 1314 insertions(+), 1028 deletions(-) delete mode 100644 Rocket.toml diff --git a/Cargo.lock b/Cargo.lock index 0e18e6f6..63504f0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,6 +41,15 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "async-compression" version = "0.3.12" @@ -55,15 +64,45 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-stream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" +dependencies = [ + "autocfg", ] [[package]] @@ -104,16 +143,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -dependencies = [ - "byteorder", - "safemem", -] - [[package]] name = "base64" version = "0.11.0" @@ -195,16 +224,6 @@ dependencies = [ "alloc-stdlib", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "bumpalo" version = "3.9.1" @@ -309,17 +328,6 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" -[[package]] -name = "cookie" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" -dependencies = [ - "percent-encoding 2.1.0", - "time 0.2.27", - "version_check 0.9.4", -] - [[package]] name = "cookie" version = "0.15.1" @@ -328,7 +336,18 @@ checksum = "d5f1c7727e460397e56abc4bddc1d49e07a1ad78fc98eb2e1c8f032a58a2f80d" dependencies = [ "percent-encoding 2.1.0", "time 0.2.27", - "version_check 0.9.4", + "version_check", +] + +[[package]] +name = "cookie" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05" +dependencies = [ + "percent-encoding 2.1.0", + "time 0.3.7", + "version_check", ] [[package]] @@ -339,7 +358,7 @@ checksum = "b3f7034c0932dc36f5bd8ec37368d971346809435824f277cb3b8299fc56167c" dependencies = [ "cookie 0.15.1", "idna 0.2.3", - "log 0.4.14", + "log", "publicsuffix", "serde", "serde_json", @@ -422,6 +441,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ctrlc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19c6cedffdc8c03a3346d723eb20bd85a13362bb96dc2ac000842c6381ec7bf" +dependencies = [ + "nix", + "winapi 0.3.9", +] + [[package]] name = "dashmap" version = "4.0.2" @@ -449,8 +478,9 @@ dependencies = [ [[package]] name = "devise" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c7580b072f1c8476148f16e0a0d5dedddab787da98d86c5082c5e9ed8ab595" dependencies = [ "devise_codegen", "devise_core", @@ -458,22 +488,25 @@ dependencies = [ [[package]] name = "devise_codegen" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "123c73e7a6e51b05c75fe1a1b2f4e241399ea5740ed810b0e3e6cacd9db5e7b2" dependencies = [ "devise_core", - "quote 1.0.15", + "quote", ] [[package]] name = "devise_core" -version = "0.3.0" -source = "git+https://github.com/SergioBenitez/Devise.git?rev=e58b3ac9a#e58b3ac9afc3b6ff10a8aaf02a3e768a8f530089" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841ef46f4787d9097405cac4e70fb8644fc037b526e8c14054247c0263c400d0" dependencies = [ "bitflags", - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn", ] [[package]] @@ -499,9 +532,9 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -566,9 +599,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" dependencies = [ "heck", - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -601,10 +634,24 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c9a4820f0ccc8a7afd67c39a0f1a0f4b07ca1725164271a64939d7aeb9af065" dependencies = [ - "log 0.4.14", + "log", "syslog", ] +[[package]] +name = "figment" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790b4292c72618abbab50f787a477014fe15634f96291de45672ce46afe122df" +dependencies = [ + "atomic", + "pear", + "serde", + "toml", + "uncased", + "version_check", +] + [[package]] name = "flate2" version = "1.0.22" @@ -734,9 +781,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -775,6 +822,19 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1d9279ca822891c1a4dae06d185612cf8fc6acfe5dff37781b41297811b12ee" +dependencies = [ + "cc", + "libc", + "log", + "rustversion", + "winapi 0.3.9", +] + [[package]] name = "generic-array" version = "0.12.4" @@ -791,7 +851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", - "version_check 0.9.4", + "version_check", ] [[package]] @@ -876,7 +936,7 @@ version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25546a65e5cf1f471f3438796fc634650b31d7fcde01d444c309aeb28b92e3a8" dependencies = [ - "log 0.4.14", + "log", "pest", "pest_derive", "quick-error 2.0.1", @@ -946,12 +1006,12 @@ version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aafcf38a1a36118242d29b92e1b08ef84e67e4a5ed06e0a80be20e6a32bfed6b" dependencies = [ - "log 0.4.14", + "log", "mac", "markup5ever", - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -988,25 +1048,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" -[[package]] -name = "hyper" -version = "0.10.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" -dependencies = [ - "base64 0.9.3", - "httparse", - "language-tags", - "log 0.3.9", - "mime 0.2.6", - "num_cpus", - "time 0.1.44", - "traitobject", - "typeable", - "unicase 1.4.2", - "url 1.7.2", -] - [[package]] name = "hyper" version = "0.14.16" @@ -1031,18 +1072,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-sync-rustls" -version = "0.3.0-rc.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cb014c4ea00486e2b62860b5e15229d37516d4924177218beafbf46583de3ab" -dependencies = [ - "hyper 0.10.16", - "rustls", - "webpki", - "webpki-roots", -] - [[package]] name = "hyper-tls" version = "0.5.0" @@ -1050,7 +1079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes 1.1.0", - "hyper 0.14.16", + "hyper", "native-tls", "tokio", "tokio-native-tls", @@ -1086,8 +1115,15 @@ checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg", "hashbrown", + "serde", ] +[[package]] +name = "inlinable_string" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" + [[package]] name = "instant" version = "0.1.12" @@ -1179,12 +1215,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" - [[package]] name = "lazy_static" version = "1.4.0" @@ -1208,7 +1238,7 @@ dependencies = [ "hostname", "httpdate", "idna 0.2.3", - "mime 0.3.16", + "mime", "native-tls", "nom 7.1.0", "once_cell", @@ -1250,15 +1280,6 @@ dependencies = [ "scopeguard", ] -[[package]] -name = "log" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -dependencies = [ - "log 0.4.14", -] - [[package]] name = "log" version = "0.4.14" @@ -1268,6 +1289,21 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "loom" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc5c7d328e32cc4954e8e01193d7f0ef5ab257b5090b70a964e099a36034309" +dependencies = [ + "cfg-if 1.0.0", + "generator", + "scoped-tls", + "serde", + "serde_json", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -1304,7 +1340,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a24f40fb03852d1cdd84330cddcaf98e9ec08a7b7768e952fad3b4cf048ec8fd" dependencies = [ - "log 0.4.14", + "log", "phf 0.8.0", "phf_codegen 0.8.0", "string_cache", @@ -1330,6 +1366,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.9" @@ -1348,6 +1393,15 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + [[package]] name = "migrations_internals" version = "1.4.1" @@ -1364,18 +1418,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9753f12909fd8d923f75ae5c3258cae1ed3c8ec052e1b38c93c21a6d157f789c" dependencies = [ "migrations_internals", - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", -] - -[[package]] -name = "mime" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" -dependencies = [ - "log 0.3.9", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1384,16 +1429,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime 0.3.16", - "unicase 2.6.0", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1422,7 +1457,7 @@ dependencies = [ "iovec", "kernel32-sys", "libc", - "log 0.4.14", + "log", "miow 0.2.2", "net2", "slab", @@ -1436,7 +1471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ "libc", - "log 0.4.14", + "log", "miow 0.3.7", "ntapi", "winapi 0.3.9", @@ -1449,7 +1484,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" dependencies = [ "lazycell", - "log 0.4.14", + "log", "mio 0.6.23", "slab", ] @@ -1476,21 +1511,23 @@ dependencies = [ ] [[package]] -name = "multipart" -version = "0.18.0" +name = "multer" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" +checksum = "5f8f35e687561d5c1667590911e6698a8cb714a134a7505718a182e7bc9d3836" dependencies = [ - "buf_redux", + "bytes 1.1.0", + "encoding_rs", + "futures-util", + "http", "httparse", - "log 0.4.14", - "mime 0.3.16", - "mime_guess", - "quick-error 1.2.3", - "rand 0.8.4", - "safemem", - "tempfile", - "twoway", + "log", + "memchr", + "mime", + "spin 0.9.2", + "tokio", + "tokio-util", + "version_check", ] [[package]] @@ -1511,7 +1548,7 @@ checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" dependencies = [ "lazy_static", "libc", - "log 0.4.14", + "log", "openssl", "openssl-probe", "openssl-sys", @@ -1538,6 +1575,19 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" +[[package]] +name = "nix" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +dependencies = [ + "bitflags", + "cc", + "cfg-if 1.0.0", + "libc", + "memoffset", +] + [[package]] name = "no-std-compat" version = "0.4.1" @@ -1561,7 +1611,7 @@ checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" dependencies = [ "memchr", "minimal-lexical", - "version_check 0.9.4", + "version_check", ] [[package]] @@ -1596,9 +1646,9 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1630,6 +1680,15 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97ba99ba6393e2c3734791401b66902d981cb03bf190af674ca69949b6d5fb15" +dependencies = [ + "libc", +] + [[package]] name = "object" version = "0.27.1" @@ -1718,7 +1777,7 @@ dependencies = [ "byteorder", "bytes 0.4.12", "httparse", - "log 0.4.14", + "log", "mio 0.6.23", "mio-extras", "rand 0.7.3", @@ -1791,24 +1850,25 @@ checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" [[package]] name = "pear" -version = "0.1.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5320f212db967792b67cfe12bd469d08afd6318a249bd917d5c19bc92200ab8a" +checksum = "15e44241c5e4c868e3eaa78b7c1848cadd6344ed4f54d029832d32b415a58702" dependencies = [ + "inlinable_string", "pear_codegen", + "yansi", ] [[package]] name = "pear_codegen" -version = "0.1.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfc1c836fdc3d1ef87c348b237b5b5c4dff922156fb2d968f57734f9669768ca" +checksum = "82a5ca643c2303ecb740d506539deba189e16f2754040a42901cd8105d0282d0" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", - "version_check 0.9.4", - "yansi", + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn", ] [[package]] @@ -1861,9 +1921,9 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -2005,22 +2065,26 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid 0.1.0", -] - [[package]] name = "proc-macro2" version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ - "unicode-xid 0.2.2", + "unicode-xid", +] + +[[package]] +name = "proc-macro2-diagnostics" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "version_check", + "yansi", ] [[package]] @@ -2069,22 +2133,13 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - [[package]] name = "quote" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" dependencies = [ - "proc-macro2 1.0.36", + "proc-macro2", ] [[package]] @@ -2099,7 +2154,7 @@ version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" dependencies = [ - "log 0.4.14", + "log", "parking_lot 0.11.2", "scheduled-thread-pool", ] @@ -2250,6 +2305,26 @@ dependencies = [ "bitflags", ] +[[package]] +name = "ref-cast" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "regex" version = "1.5.4" @@ -2261,6 +2336,15 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", +] + [[package]] name = "regex-syntax" version = "0.6.25" @@ -2293,13 +2377,13 @@ dependencies = [ "h2", "http", "http-body", - "hyper 0.14.16", + "hyper", "hyper-tls", "ipnet", "js-sys", "lazy_static", - "log 0.4.14", - "mime 0.3.16", + "log", + "mime", "native-tls", "percent-encoding 2.1.0", "pin-project-lite", @@ -2338,7 +2422,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi 0.3.9", @@ -2366,65 +2450,82 @@ dependencies = [ [[package]] name = "rocket" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket?rev=263e39b5b429de1913ce7e3036575a7b4d88b6d7#263e39b5b429de1913ce7e3036575a7b4d88b6d7" +version = "0.5.0-rc.1" +source = "git+https://github.com/SergioBenitez/Rocket?rev=8cae077ba1d54b92cdef3e171a730b819d5eeb8e#8cae077ba1d54b92cdef3e171a730b819d5eeb8e" dependencies = [ + "async-stream", + "async-trait", + "atomic", "atty", "binascii", - "log 0.4.14", + "bytes 1.1.0", + "either", + "figment", + "futures", + "indexmap", + "log", "memchr", + "multer", "num_cpus", - "pear", + "parking_lot 0.11.2", + "pin-project-lite", + "rand 0.8.4", + "ref-cast", "rocket_codegen", "rocket_http", + "serde", + "serde_json", "state", - "time 0.2.27", - "toml", - "version_check 0.9.4", + "tempfile", + "time 0.3.7", + "tokio", + "tokio-stream", + "tokio-util", + "ubyte", + "version_check", "yansi", ] [[package]] name = "rocket_codegen" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket?rev=263e39b5b429de1913ce7e3036575a7b4d88b6d7#263e39b5b429de1913ce7e3036575a7b4d88b6d7" +version = "0.5.0-rc.1" +source = "git+https://github.com/SergioBenitez/Rocket?rev=8cae077ba1d54b92cdef3e171a730b819d5eeb8e#8cae077ba1d54b92cdef3e171a730b819d5eeb8e" dependencies = [ "devise", "glob", "indexmap", - "quote 1.0.15", + "proc-macro2", + "quote", "rocket_http", - "version_check 0.9.4", - "yansi", -] - -[[package]] -name = "rocket_contrib" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket?rev=263e39b5b429de1913ce7e3036575a7b4d88b6d7#263e39b5b429de1913ce7e3036575a7b4d88b6d7" -dependencies = [ - "log 0.4.14", - "rocket", - "serde", - "serde_json", + "syn", + "unicode-xid", ] [[package]] name = "rocket_http" -version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket?rev=263e39b5b429de1913ce7e3036575a7b4d88b6d7#263e39b5b429de1913ce7e3036575a7b4d88b6d7" +version = "0.5.0-rc.1" +source = "git+https://github.com/SergioBenitez/Rocket?rev=8cae077ba1d54b92cdef3e171a730b819d5eeb8e#8cae077ba1d54b92cdef3e171a730b819d5eeb8e" dependencies = [ - "cookie 0.14.4", - "hyper 0.10.16", - "hyper-sync-rustls", + "cookie 0.16.0", + "either", + "http", + "hyper", "indexmap", + "log", + "memchr", "pear", - "percent-encoding 1.0.1", + "percent-encoding 2.1.0", + "pin-project-lite", + "ref-cast", "rustls", + "serde", "smallvec 1.8.0", + "stable-pattern", "state", - "time 0.2.27", - "unicode-xid 0.2.2", + "time 0.3.7", + "tokio", + "tokio-rustls", + "uncased", ] [[package]] @@ -2444,29 +2545,29 @@ dependencies = [ [[package]] name = "rustls" -version = "0.17.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.11.0", - "log 0.4.14", + "base64 0.13.0", + "log", "ring", "sct", "webpki", ] +[[package]] +name = "rustversion" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" + [[package]] name = "ryu" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "same-file" version = "1.0.6" @@ -2495,6 +2596,12 @@ dependencies = [ "parking_lot 0.11.2", ] +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + [[package]] name = "scopeguard" version = "1.1.0" @@ -2574,9 +2681,9 @@ version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -2655,6 +2762,24 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + [[package]] name = "simple_asn1" version = "0.4.1" @@ -2720,6 +2845,21 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" + +[[package]] +name = "stable-pattern" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4564168c00635f88eaed410d5efa8131afa8d8699a612c80c455a0ba05c21045" +dependencies = [ + "memchr", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -2732,14 +2872,17 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" dependencies = [ - "version_check 0.9.4", + "version_check", ] [[package]] name = "state" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" +checksum = "87cf4f5369e6d3044b5e365c9690f451516ac8f0954084622b49ea3fde2f6de5" +dependencies = [ + "loom", +] [[package]] name = "stdweb" @@ -2761,11 +2904,11 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", + "proc-macro2", + "quote", "serde", "serde_derive", - "syn 1.0.86", + "syn", ] [[package]] @@ -2775,13 +2918,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" dependencies = [ "base-x", - "proc-macro2 1.0.36", - "quote 1.0.15", + "proc-macro2", + "quote", "serde", "serde_derive", "serde_json", "sha1", - "syn 1.0.86", + "syn", ] [[package]] @@ -2812,8 +2955,8 @@ checksum = "f24c8e5e19d22a726626f1a5e16fe15b132dcf21d10177fa5a45ce7962996b97" dependencies = [ "phf_generator 0.8.0", "phf_shared 0.8.0", - "proc-macro2 1.0.36", - "quote 1.0.15", + "proc-macro2", + "quote", ] [[package]] @@ -2822,26 +2965,15 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid 0.1.0", -] - [[package]] name = "syn" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "unicode-xid 0.2.2", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] @@ -2852,7 +2984,7 @@ checksum = "a0641142b4081d3d44beffa4eefd7346a228cdf91ed70186db2ca2cef762d327" dependencies = [ "error-chain", "libc", - "log 0.4.14", + "log", "time 0.1.44", ] @@ -2896,9 +3028,18 @@ version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", ] [[package]] @@ -2931,11 +3072,23 @@ dependencies = [ "libc", "standback", "stdweb", - "time-macros", - "version_check 0.9.4", + "time-macros 0.1.1", + "version_check", "winapi 0.3.9", ] +[[package]] +name = "time" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "004cbc98f30fa233c61a38bc77e96a9106e65c88f2d3bef182ae952027e5753d" +dependencies = [ + "itoa 1.0.1", + "libc", + "num_threads", + "time-macros 0.2.3", +] + [[package]] name = "time-macros" version = "0.1.1" @@ -2946,6 +3099,12 @@ dependencies = [ "time-macros-impl", ] +[[package]] +name = "time-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25eb0ca3468fc0acc11828786797f6ef9aa1555e4a211a60d64cc8e4d1be47d6" + [[package]] name = "time-macros-impl" version = "0.1.2" @@ -2953,10 +3112,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.36", - "quote 1.0.15", + "proc-macro2", + "quote", "standback", - "syn 1.0.86", + "syn", ] [[package]] @@ -2985,10 +3144,25 @@ dependencies = [ "memchr", "mio 0.7.14", "num_cpus", + "once_cell", + "parking_lot 0.11.2", "pin-project-lite", + "signal-hook-registry", + "tokio-macros", "winapi 0.3.9", ] +[[package]] +name = "tokio-macros" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tokio-native-tls" version = "0.3.0" @@ -2999,6 +3173,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio", + "webpki", +] + [[package]] name = "tokio-socks" version = "0.5.1" @@ -3011,6 +3196,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.6.9" @@ -3020,16 +3216,16 @@ dependencies = [ "bytes 1.1.0", "futures-core", "futures-sink", - "log 0.4.14", + "log", "pin-project-lite", "tokio", ] [[package]] name = "toml" -version = "0.4.10" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] @@ -3059,7 +3255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", - "log 0.4.14", + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -3071,9 +3267,9 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -3086,10 +3282,33 @@ dependencies = [ ] [[package]] -name = "traitobject" -version = "0.1.0" +name = "tracing-log" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5312f325fe3588e277415f5a6cca1f4ccad0f248c4cd5a4bd33032d7286abc22" +dependencies = [ + "ansi_term", + "lazy_static", + "matchers", + "regex", + "sharded-slab", + "smallvec 1.8.0", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] [[package]] name = "trust-dns-proto" @@ -3107,7 +3326,7 @@ dependencies = [ "idna 0.2.3", "ipnet", "lazy_static", - "log 0.4.14", + "log", "rand 0.8.4", "smallvec 1.8.0", "thiserror", @@ -3126,7 +3345,7 @@ dependencies = [ "futures-util", "ipconfig", "lazy_static", - "log 0.4.14", + "log", "lru-cache", "parking_lot 0.11.2", "resolv-conf", @@ -3142,21 +3361,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", -] - -[[package]] -name = "typeable" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" - [[package]] name = "typenum" version = "1.15.0" @@ -3180,6 +3384,15 @@ dependencies = [ "time 0.1.44", ] +[[package]] +name = "ubyte" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42756bb9e708855de2f8a98195643dff31a97f0485d90d8467b39dc24be9e8fe" +dependencies = [ + "serde", +] + [[package]] name = "ucd-trie" version = "0.1.3" @@ -3192,25 +3405,8 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" dependencies = [ - "version_check 0.9.4", -] - -[[package]] -name = "unicase" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -dependencies = [ - "version_check 0.1.5", -] - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check 0.9.4", + "serde", + "version_check", ] [[package]] @@ -3234,12 +3430,6 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - [[package]] name = "unicode-xid" version = "0.2.2" @@ -3302,12 +3492,14 @@ dependencies = [ "chrono-tz", "cookie 0.15.1", "cookie_store", + "ctrlc", "data-encoding", "data-url", "diesel", "diesel_migrations", "dotenv", "fern", + "futures", "governor", "handlebars", "html5ever", @@ -3316,9 +3508,8 @@ dependencies = [ "jsonwebtoken", "lettre", "libsqlite3-sys", - "log 0.4.14", + "log", "markup5ever_rcdom", - "multipart", "num-derive", "num-traits", "once_cell", @@ -3333,11 +3524,11 @@ dependencies = [ "ring", "rmpv", "rocket", - "rocket_contrib", "serde", "serde_json", "syslog", "time 0.2.27", + "tokio", "totp-lite", "tracing", "u2f", @@ -3353,12 +3544,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" - [[package]] name = "version_check" version = "0.9.4" @@ -3382,7 +3567,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.14", + "log", "try-lock", ] @@ -3416,10 +3601,10 @@ checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.14", - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "log", + "proc-macro2", + "quote", + "syn", "wasm-bindgen-shared", ] @@ -3441,7 +3626,7 @@ version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" dependencies = [ - "quote 1.0.15", + "quote", "wasm-bindgen-macro-support", ] @@ -3451,9 +3636,9 @@ version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" dependencies = [ - "proc-macro2 1.0.36", - "quote 1.0.15", - "syn 1.0.86", + "proc-macro2", + "quote", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3503,15 +3688,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "webpki-roots" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" -dependencies = [ - "webpki", -] - [[package]] name = "widestring" version = "0.4.3" @@ -3595,7 +3771,7 @@ version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9234163818fd8e2418fcde330655e757900d4236acd8cc70fef345ef91f6d865" dependencies = [ - "log 0.4.14", + "log", "mac", "markup5ever", "time 0.1.44", diff --git a/Cargo.toml b/Cargo.toml index 46a7ca07..3cdd3d2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "vaultwarden" version = "1.0.0" authors = ["Daniel García "] edition = "2021" -rust-version = "1.60" +rust-version = "1.56" resolver = "2" repository = "https://github.com/dani-garcia/vaultwarden" @@ -13,6 +13,7 @@ publish = false build = "build.rs" [features] +# default = ["sqlite"] # Empty to keep compatibility, prefer to set USE_SYSLOG=true enable_syslog = [] mysql = ["diesel/mysql", "diesel_migrations/mysql"] @@ -29,22 +30,22 @@ unstable = [] syslog = "4.0.1" [dependencies] -# Web framework for nightly with a focus on ease-of-use, expressibility, and speed. -rocket = { version = "=0.5.0-dev", features = ["tls"], default-features = false } -rocket_contrib = "=0.5.0-dev" +# Web framework +rocket = { version = "0.5.0-rc.1", features = ["tls", "json"], default-features = false } -# HTTP client -reqwest = { version = "0.11.9", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] } +# Async futures +futures = "0.3.19" +tokio = { version = "1.16.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot"] } + + # HTTP client +reqwest = { version = "0.11.9", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] } +bytes = "1.1.0" # Used for custom short lived cookie jar cookie = "0.15.1" cookie_store = "0.15.1" -bytes = "1.1.0" url = "2.2.2" -# multipart/form-data support -multipart = { version = "0.18.0", features = ["server"], default-features = false } - # WebSockets library ws = { version = "0.11.1", package = "parity-ws" } @@ -141,10 +142,10 @@ backtrace = "0.3.64" paste = "1.0.6" governor = "0.4.1" +ctrlc = { version = "3.2.1", features = ["termination"] } + [patch.crates-io] -# Use newest ring -rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' } -rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' } +rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '8cae077ba1d54b92cdef3e171a730b819d5eeb8e' } # The maintainer of the `job_scheduler` crate doesn't seem to have responded # to any issues or PRs for almost a year (as of April 2021). This hopefully diff --git a/Rocket.toml b/Rocket.toml deleted file mode 100644 index e8409cb3..00000000 --- a/Rocket.toml +++ /dev/null @@ -1,2 +0,0 @@ -[global.limits] -json = 10485760 # 10 MiB diff --git a/rust-toolchain b/rust-toolchain index 4c62882b..2bf5ad04 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2022-01-23 +stable diff --git a/src/api/admin.rs b/src/api/admin.rs index 38d30c99..c25587d0 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -3,13 +3,14 @@ use serde::de::DeserializeOwned; use serde_json::Value; use std::env; +use rocket::serde::json::Json; use rocket::{ - http::{Cookie, Cookies, SameSite, Status}, - request::{self, FlashMessage, Form, FromRequest, Outcome, Request}, - response::{content::Html, Flash, Redirect}, + form::Form, + http::{Cookie, CookieJar, SameSite, Status}, + request::{self, FlashMessage, FromRequest, Outcome, Request}, + response::{content::RawHtml as Html, Flash, Redirect}, Route, }; -use rocket_contrib::json::Json; use crate::{ api::{ApiResult, EmptyResult, JsonResult, NumberOrString}, @@ -85,10 +86,11 @@ fn admin_path() -> String { struct Referer(Option); -impl<'a, 'r> FromRequest<'a, 'r> for Referer { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for Referer { type Error = (); - fn from_request(request: &'a Request<'r>) -> request::Outcome { + async fn from_request(request: &'r Request<'_>) -> request::Outcome { Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string))) } } @@ -96,10 +98,11 @@ impl<'a, 'r> FromRequest<'a, 'r> for Referer { #[derive(Debug)] struct IpHeader(Option); -impl<'a, 'r> FromRequest<'a, 'r> for IpHeader { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for IpHeader { type Error = (); - fn from_request(req: &'a Request<'r>) -> Outcome { + async fn from_request(req: &'r Request<'_>) -> Outcome { if req.headers().get_one(&CONFIG.ip_header()).is_some() { Outcome::Success(IpHeader(Some(CONFIG.ip_header()))) } else if req.headers().get_one("X-Client-IP").is_some() { @@ -138,7 +141,7 @@ fn admin_url(referer: Referer) -> String { #[get("/", rank = 2)] fn admin_login(flash: Option) -> ApiResult> { // If there is an error, show it - let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg())); + let msg = flash.map(|msg| format!("{}: {}", msg.kind(), msg.message())); let json = json!({ "page_content": "admin/login", "version": VERSION, @@ -159,7 +162,7 @@ struct LoginForm { #[post("/", data = "")] fn post_admin_login( data: Form, - mut cookies: Cookies, + cookies: &CookieJar, ip: ClientIp, referer: Referer, ) -> Result> { @@ -180,7 +183,7 @@ fn post_admin_login( let cookie = Cookie::build(COOKIE_NAME, jwt) .path(admin_path()) - .max_age(time::Duration::minutes(20)) + .max_age(rocket::time::Duration::minutes(20)) .same_site(SameSite::Strict) .http_only(true) .finish(); @@ -297,7 +300,7 @@ fn test_smtp(data: Json, _token: AdminToken) -> EmptyResult { } #[get("/logout")] -fn logout(mut cookies: Cookies, referer: Referer) -> Redirect { +fn logout(cookies: &CookieJar, referer: Referer) -> Redirect { cookies.remove(Cookie::named(COOKIE_NAME)); Redirect::to(admin_url(referer)) } @@ -462,23 +465,23 @@ struct GitCommit { sha: String, } -fn get_github_api(url: &str) -> Result { +async fn get_github_api(url: &str) -> Result { let github_api = get_reqwest_client(); - Ok(github_api.get(url).send()?.error_for_status()?.json::()?) + Ok(github_api.get(url).send().await?.error_for_status()?.json::().await?) } -fn has_http_access() -> bool { +async fn has_http_access() -> bool { let http_access = get_reqwest_client(); - match http_access.head("https://github.com/dani-garcia/vaultwarden").send() { + match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await { Ok(r) => r.status().is_success(), _ => false, } } #[get("/diagnostics")] -fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult> { +async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult> { use crate::util::read_file_string; use chrono::prelude::*; use std::net::ToSocketAddrs; @@ -497,7 +500,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu // Execute some environment checks let running_within_docker = is_running_in_docker(); - let has_http_access = has_http_access(); + let has_http_access = has_http_access().await; let uses_proxy = env::var_os("HTTP_PROXY").is_some() || env::var_os("http_proxy").is_some() || env::var_os("HTTPS_PROXY").is_some() @@ -513,11 +516,14 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu // TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already. let (latest_release, latest_commit, latest_web_build) = if has_http_access { ( - match get_github_api::("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") { + match get_github_api::("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") + .await + { Ok(r) => r.tag_name, _ => "-".to_string(), }, - match get_github_api::("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") { + match get_github_api::("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await + { Ok(mut c) => { c.sha.truncate(8); c.sha @@ -531,7 +537,9 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu } else { match get_github_api::( "https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest", - ) { + ) + .await + { Ok(r) => r.tag_name.trim_start_matches('v').to_string(), _ => "-".to_string(), } @@ -562,7 +570,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu "ip_header_config": &CONFIG.ip_header(), "uses_proxy": uses_proxy, "db_type": *DB_TYPE, - "db_version": get_sql_server_version(&conn), + "db_version": get_sql_server_version(&conn).await, "admin_url": format!("{}/diagnostics", admin_url(Referer(None))), "overrides": &CONFIG.get_overrides().join(", "), "server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(), @@ -591,9 +599,9 @@ fn delete_config(_token: AdminToken) -> EmptyResult { } #[post("/config/backup_db")] -fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult { +async fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult { if *CAN_BACKUP { - backup_database(&conn) + backup_database(&conn).await } else { err!("Can't back up current DB (Only SQLite supports this feature)"); } @@ -601,21 +609,22 @@ fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult { pub struct AdminToken {} -impl<'a, 'r> FromRequest<'a, 'r> for AdminToken { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for AdminToken { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> request::Outcome { + async fn from_request(request: &'r Request<'_>) -> request::Outcome { if CONFIG.disable_admin_token() { Outcome::Success(AdminToken {}) } else { - let mut cookies = request.cookies(); + let cookies = request.cookies(); let access_token = match cookies.get(COOKIE_NAME) { Some(cookie) => cookie.value(), None => return Outcome::Forward(()), // If there is no cookie, redirect to login }; - let ip = match request.guard::() { + let ip = match ClientIp::from_request(request).await { Outcome::Success(ip) => ip.ip, _ => err_handler!("Error getting Client IP"), }; diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs index 32c81e93..d790f67d 100644 --- a/src/api/core/accounts.rs +++ b/src/api/core/accounts.rs @@ -1,5 +1,5 @@ use chrono::Utc; -use rocket_contrib::json::Json; +use rocket::serde::json::Json; use serde_json::Value; use crate::{ diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index ff193a3e..1e6d6b1b 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -1,13 +1,14 @@ use std::collections::{HashMap, HashSet}; -use std::path::{Path, PathBuf}; use chrono::{NaiveDateTime, Utc}; -use rocket::{http::ContentType, request::Form, Data, Route}; -use rocket_contrib::json::Json; +use rocket::fs::TempFile; +use rocket::serde::json::Json; +use rocket::{ + form::{Form, FromForm}, + Route, +}; use serde_json::Value; -use multipart::server::{save::SavedData, Multipart, SaveResult}; - use crate::{ api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}, auth::Headers, @@ -79,9 +80,9 @@ pub fn routes() -> Vec { ] } -pub fn purge_trashed_ciphers(pool: DbPool) { +pub async fn purge_trashed_ciphers(pool: DbPool) { debug!("Purging trashed ciphers"); - if let Ok(conn) = pool.get() { + if let Ok(conn) = pool.get().await { Cipher::purge_trash(&conn); } else { error!("Failed to get DB connection while purging trashed ciphers") @@ -90,12 +91,12 @@ pub fn purge_trashed_ciphers(pool: DbPool) { #[derive(FromForm, Default)] struct SyncData { - #[form(field = "excludeDomains")] + #[field(name = "excludeDomains")] exclude_domains: bool, // Default: 'false' } #[get("/sync?")] -fn sync(data: Form, headers: Headers, conn: DbConn) -> Json { +fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json { let user_json = headers.user.to_json(&conn); let folders = Folder::find_by_user(&headers.user.uuid, &conn); @@ -828,6 +829,12 @@ fn post_attachment_v2( }))) } +#[derive(FromForm)] +struct UploadData<'f> { + key: Option, + data: TempFile<'f>, +} + /// Saves the data content of an attachment to a file. This is common code /// shared between the v2 and legacy attachment APIs. /// @@ -836,22 +843,21 @@ fn post_attachment_v2( /// /// When used with the v2 API, post_attachment_v2() has already created the /// database record, which is passed in as `attachment`. -fn save_attachment( +async fn save_attachment( mut attachment: Option, cipher_uuid: String, - data: Data, - content_type: &ContentType, + data: Form>, headers: &Headers, - conn: &DbConn, - nt: Notify, -) -> Result { - let cipher = match Cipher::find_by_uuid(&cipher_uuid, conn) { + conn: DbConn, + nt: Notify<'_>, +) -> Result<(Cipher, DbConn), crate::error::Error> { + let cipher = match Cipher::find_by_uuid(&cipher_uuid, &conn) { Some(cipher) => cipher, - None => err_discard!("Cipher doesn't exist", data), + None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) { - err_discard!("Cipher is not write accessible", data) + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) { + err!("Cipher is not write accessible") } // In the v2 API, the attachment record has already been created, @@ -863,11 +869,11 @@ fn save_attachment( let size_limit = if let Some(ref user_uuid) = cipher.user_uuid { match CONFIG.user_attachment_limit() { - Some(0) => err_discard!("Attachments are disabled", data), + Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, conn) + size_adjust; + let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn) + size_adjust; if left <= 0 { - err_discard!("Attachment storage limit reached! Delete some attachments to free up space", data) + err!("Attachment storage limit reached! Delete some attachments to free up space") } Some(left as u64) } @@ -875,130 +881,78 @@ fn save_attachment( } } else if let Some(ref org_uuid) = cipher.organization_uuid { match CONFIG.org_attachment_limit() { - Some(0) => err_discard!("Attachments are disabled", data), + Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, conn) + size_adjust; + let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn) + size_adjust; if left <= 0 { - err_discard!("Attachment storage limit reached! Delete some attachments to free up space", data) + err!("Attachment storage limit reached! Delete some attachments to free up space") } Some(left as u64) } None => None, } } else { - err_discard!("Cipher is neither owned by a user nor an organization", data); + err!("Cipher is neither owned by a user nor an organization"); }; - let mut params = content_type.params(); - let boundary_pair = params.next().expect("No boundary provided"); - let boundary = boundary_pair.1; + let mut data = data.into_inner(); - let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher_uuid); - let mut path = PathBuf::new(); - - let mut attachment_key = None; - let mut error = None; - - Multipart::with_body(data.open(), boundary) - .foreach_entry(|mut field| { - match &*field.headers.name { - "key" => { - use std::io::Read; - let mut key_buffer = String::new(); - if field.data.read_to_string(&mut key_buffer).is_ok() { - attachment_key = Some(key_buffer); - } - } - "data" => { - // In the legacy API, this is the encrypted filename - // provided by the client, stored to the database as-is. - // In the v2 API, this value doesn't matter, as it was - // already provided and stored via an earlier API call. - let encrypted_filename = field.headers.filename; - - // This random ID is used as the name of the file on disk. - // In the legacy API, we need to generate this value here. - // In the v2 API, we use the value from post_attachment_v2(). - let file_id = match &attachment { - Some(attachment) => attachment.id.clone(), // v2 API - None => crypto::generate_attachment_id(), // Legacy API - }; - path = base_path.join(&file_id); - - let size = - match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) { - SaveResult::Full(SavedData::File(_, size)) => size as i32, - SaveResult::Full(other) => { - error = Some(format!("Attachment is not a file: {:?}", other)); - return; - } - SaveResult::Partial(_, reason) => { - error = Some(format!("Attachment storage limit exceeded with this file: {:?}", reason)); - return; - } - SaveResult::Error(e) => { - error = Some(format!("Error: {:?}", e)); - return; - } - }; - - if let Some(attachment) = &mut attachment { - // v2 API - - // Check the actual size against the size initially provided by - // the client. Upstream allows +/- 1 MiB deviation from this - // size, but it's not clear when or why this is needed. - const LEEWAY: i32 = 1024 * 1024; // 1 MiB - let min_size = attachment.file_size - LEEWAY; - let max_size = attachment.file_size + LEEWAY; - - if min_size <= size && size <= max_size { - if size != attachment.file_size { - // Update the attachment with the actual file size. - attachment.file_size = size; - attachment.save(conn).expect("Error updating attachment"); - } - } else { - attachment.delete(conn).ok(); - - let err_msg = "Attachment size mismatch".to_string(); - error!("{} (expected within [{}, {}], got {})", err_msg, min_size, max_size, size); - error = Some(err_msg); - } - } else { - // Legacy API - - if encrypted_filename.is_none() { - error = Some("No filename provided".to_string()); - return; - } - if attachment_key.is_none() { - error = Some("No attachment key provided".to_string()); - return; - } - let attachment = Attachment::new( - file_id, - cipher_uuid.clone(), - encrypted_filename.unwrap(), - size, - attachment_key.clone(), - ); - attachment.save(conn).expect("Error saving attachment"); - } - } - _ => error!("Invalid multipart name"), - } - }) - .expect("Error processing multipart data"); - - if let Some(ref e) = error { - std::fs::remove_file(path).ok(); - err!(e); + if let Some(size_limit) = size_limit { + if data.data.len() > size_limit { + err!("Attachment storage limit exceeded with this file"); + } } - nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn)); + let file_id = match &attachment { + Some(attachment) => attachment.id.clone(), // v2 API + None => crypto::generate_attachment_id(), // Legacy API + }; - Ok(cipher) + let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(&cipher_uuid); + let file_path = folder_path.join(&file_id); + tokio::fs::create_dir_all(&folder_path).await?; + + let size = data.data.len() as i32; + if let Some(attachment) = &mut attachment { + // v2 API + + // Check the actual size against the size initially provided by + // the client. Upstream allows +/- 1 MiB deviation from this + // size, but it's not clear when or why this is needed. + const LEEWAY: i32 = 1024 * 1024; // 1 MiB + let min_size = attachment.file_size - LEEWAY; + let max_size = attachment.file_size + LEEWAY; + + if min_size <= size && size <= max_size { + if size != attachment.file_size { + // Update the attachment with the actual file size. + attachment.file_size = size; + attachment.save(&conn).expect("Error updating attachment"); + } + } else { + attachment.delete(&conn).ok(); + + err!(format!("Attachment size mismatch (expected within [{}, {}], got {})", min_size, max_size, size)); + } + } else { + // Legacy API + let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string()); + + if encrypted_filename.is_none() { + err!("No filename provided") + } + if data.key.is_none() { + err!("No attachment key provided") + } + let attachment = Attachment::new(file_id, cipher_uuid.clone(), encrypted_filename.unwrap(), size, data.key); + attachment.save(&conn).expect("Error saving attachment"); + } + + data.data.persist_to(file_path).await?; + + nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn)); + + Ok((cipher, conn)) } /// v2 API for uploading the actual data content of an attachment. @@ -1006,14 +960,13 @@ fn save_attachment( /// /ciphers//attachment/v2 route, which would otherwise conflict /// with this one. #[post("/ciphers//attachment/", format = "multipart/form-data", data = "", rank = 1)] -fn post_attachment_v2_data( +async fn post_attachment_v2_data( uuid: String, attachment_id: String, - data: Data, - content_type: &ContentType, + data: Form>, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { let attachment = match Attachment::find_by_id(&attachment_id, &conn) { Some(attachment) if uuid == attachment.cipher_uuid => Some(attachment), @@ -1021,54 +974,51 @@ fn post_attachment_v2_data( None => err!("Attachment doesn't exist"), }; - save_attachment(attachment, uuid, data, content_type, &headers, &conn, nt)?; + save_attachment(attachment, uuid, data, &headers, conn, nt).await?; Ok(()) } /// Legacy API for creating an attachment associated with a cipher. #[post("/ciphers//attachment", format = "multipart/form-data", data = "")] -fn post_attachment( +async fn post_attachment( uuid: String, - data: Data, - content_type: &ContentType, + data: Form>, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> JsonResult { // Setting this as None signifies to save_attachment() that it should create // the attachment database record as well as saving the data to disk. let attachment = None; - let cipher = save_attachment(attachment, uuid, data, content_type, &headers, &conn, nt)?; + let (cipher, conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?; Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) } #[post("/ciphers//attachment-admin", format = "multipart/form-data", data = "")] -fn post_attachment_admin( +async fn post_attachment_admin( uuid: String, - data: Data, - content_type: &ContentType, + data: Form>, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> JsonResult { - post_attachment(uuid, data, content_type, headers, conn, nt) + post_attachment(uuid, data, headers, conn, nt).await } #[post("/ciphers//attachment//share", format = "multipart/form-data", data = "")] -fn post_attachment_share( +async fn post_attachment_share( uuid: String, attachment_id: String, - data: Data, - content_type: &ContentType, + data: Form>, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> JsonResult { _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt)?; - post_attachment(uuid, data, content_type, headers, conn, nt) + post_attachment(uuid, data, headers, conn, nt).await } #[post("/ciphers//attachment//delete-admin")] @@ -1248,13 +1198,13 @@ fn move_cipher_selected_put( #[derive(FromForm)] struct OrganizationId { - #[form(field = "organizationId")] + #[field(name = "organizationId")] org_id: String, } #[post("/ciphers/purge?", data = "")] fn delete_all( - organization: Option>, + organization: Option, data: JsonUpcase, headers: Headers, conn: DbConn, diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs index 8ad1fdd1..3b6d8c08 100644 --- a/src/api/core/emergency_access.rs +++ b/src/api/core/emergency_access.rs @@ -1,6 +1,6 @@ use chrono::{Duration, Utc}; +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use serde_json::Value; use std::borrow::Borrow; @@ -709,13 +709,13 @@ fn check_emergency_access_allowed() -> EmptyResult { Ok(()) } -pub fn emergency_request_timeout_job(pool: DbPool) { +pub async fn emergency_request_timeout_job(pool: DbPool) { debug!("Start emergency_request_timeout_job"); if !CONFIG.emergency_access_allowed() { return; } - if let Ok(conn) = pool.get() { + if let Ok(conn) = pool.get().await { let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn); if emergency_access_list.is_empty() { @@ -756,13 +756,13 @@ pub fn emergency_request_timeout_job(pool: DbPool) { } } -pub fn emergency_notification_reminder_job(pool: DbPool) { +pub async fn emergency_notification_reminder_job(pool: DbPool) { debug!("Start emergency_notification_reminder_job"); if !CONFIG.emergency_access_allowed() { return; } - if let Ok(conn) = pool.get() { + if let Ok(conn) = pool.get().await { let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn); if emergency_access_list.is_empty() { diff --git a/src/api/core/folders.rs b/src/api/core/folders.rs index 57ec7f18..e2a32f7b 100644 --- a/src/api/core/folders.rs +++ b/src/api/core/folders.rs @@ -1,4 +1,4 @@ -use rocket_contrib::json::Json; +use rocket::serde::json::Json; use serde_json::Value; use crate::{ diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 77e8780d..79556494 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -31,8 +31,8 @@ pub fn routes() -> Vec { // // Move this somewhere else // +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use serde_json::Value; use crate::{ @@ -144,7 +144,7 @@ fn put_eq_domains(data: JsonUpcase, headers: Headers, conn: DbC } #[get("/hibp/breach?")] -fn hibp_breach(username: String) -> JsonResult { +async fn hibp_breach(username: String) -> JsonResult { let url = format!( "https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false", username @@ -153,14 +153,14 @@ fn hibp_breach(username: String) -> JsonResult { if let Some(api_key) = crate::CONFIG.hibp_api_key() { let hibp_client = get_reqwest_client(); - let res = hibp_client.get(&url).header("hibp-api-key", api_key).send()?; + let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?; // If we get a 404, return a 404, it means no breached accounts if res.status() == 404 { return Err(Error::empty().with_code(404)); } - let value: Value = res.error_for_status()?.json()?; + let value: Value = res.error_for_status()?.json().await?; Ok(Json(value)) } else { Ok(Json(json!([{ diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index fa79c39c..5716cbf0 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -1,6 +1,6 @@ use num_traits::FromPrimitive; -use rocket::{request::Form, Route}; -use rocket_contrib::json::Json; +use rocket::serde::json::Json; +use rocket::Route; use serde_json::Value; use crate::{ @@ -469,12 +469,12 @@ fn put_collection_users( #[derive(FromForm)] struct OrgIdData { - #[form(field = "organizationId")] + #[field(name = "organizationId")] organization_id: String, } #[get("/ciphers/organization-details?")] -fn get_org_details(data: Form, headers: Headers, conn: DbConn) -> Json { +fn get_org_details(data: OrgIdData, headers: Headers, conn: DbConn) -> Json { let ciphers = Cipher::find_by_org(&data.organization_id, &conn); let ciphers_json: Vec = ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect(); @@ -1097,14 +1097,14 @@ struct RelationsData { #[post("/ciphers/import-organization?", data = "")] fn post_org_import( - query: Form, + query: OrgIdData, data: JsonUpcase, headers: AdminHeaders, conn: DbConn, nt: Notify, ) -> EmptyResult { let data: ImportData = data.into_inner().data; - let org_id = query.into_inner().organization_id; + let org_id = query.organization_id; // Read and create the collections let collections: Vec<_> = data diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs index 72437f15..a185fe89 100644 --- a/src/api/core/sends.rs +++ b/src/api/core/sends.rs @@ -1,9 +1,10 @@ -use std::{io::Read, path::Path}; +use std::path::Path; use chrono::{DateTime, Duration, Utc}; -use multipart::server::{save::SavedData, Multipart, SaveResult}; -use rocket::{http::ContentType, response::NamedFile, Data}; -use rocket_contrib::json::Json; +use rocket::form::Form; +use rocket::fs::NamedFile; +use rocket::fs::TempFile; +use rocket::serde::json::Json; use serde_json::Value; use crate::{ @@ -31,9 +32,9 @@ pub fn routes() -> Vec { ] } -pub fn purge_sends(pool: DbPool) { +pub async fn purge_sends(pool: DbPool) { debug!("Purging sends"); - if let Ok(conn) = pool.get() { + if let Ok(conn) = pool.get().await { Send::purge(&conn); } else { error!("Failed to get DB connection while purging sends") @@ -177,25 +178,23 @@ fn post_send(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Not Ok(Json(send.to_json())) } +#[derive(FromForm)] +struct UploadData<'f> { + model: Json>, + data: TempFile<'f>, +} + #[post("/sends/file", format = "multipart/form-data", data = "")] -fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { +async fn post_send_file(data: Form>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { enforce_disable_send_policy(&headers, &conn)?; - let boundary = content_type.params().next().expect("No boundary provided").1; + let UploadData { + model, + mut data, + } = data.into_inner(); + let model = model.into_inner().data; - let mut mpart = Multipart::with_body(data.open(), boundary); - - // First entry is the SendData JSON - let mut model_entry = match mpart.read_entry()? { - Some(e) if &*e.headers.name == "model" => e, - Some(_) => err!("Invalid entry name"), - None => err!("No model entry present"), - }; - - let mut buf = String::new(); - model_entry.data.read_to_string(&mut buf)?; - let data = serde_json::from_str::>(&buf)?; - enforce_disable_hide_email_policy(&data.data, &headers, &conn)?; + enforce_disable_hide_email_policy(&model, &headers, &conn)?; // Get the file length and add an extra 5% to avoid issues const SIZE_525_MB: u64 = 550_502_400; @@ -212,45 +211,27 @@ fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn None => SIZE_525_MB, }; - // Create the Send - let mut send = create_send(data.data, headers.user.uuid)?; - let file_id = crate::crypto::generate_send_id(); - + let mut send = create_send(model, headers.user.uuid)?; if send.atype != SendType::File as i32 { err!("Send content is not a file"); } - let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id); + let size = data.len(); + if size > size_limit { + err!("Attachment storage limit exceeded with this file"); + } - // Read the data entry and save the file - let mut data_entry = match mpart.read_entry()? { - Some(e) if &*e.headers.name == "data" => e, - Some(_) => err!("Invalid entry name"), - None => err!("No model entry present"), - }; + let file_id = crate::crypto::generate_send_id(); + let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid); + let file_path = folder_path.join(&file_id); + tokio::fs::create_dir_all(&folder_path).await?; + data.persist_to(&file_path).await?; - let size = match data_entry.data.save().memory_threshold(0).size_limit(size_limit).with_path(&file_path) { - SaveResult::Full(SavedData::File(_, size)) => size as i32, - SaveResult::Full(other) => { - std::fs::remove_file(&file_path).ok(); - err!(format!("Attachment is not a file: {:?}", other)); - } - SaveResult::Partial(_, reason) => { - std::fs::remove_file(&file_path).ok(); - err!(format!("Attachment storage limit exceeded with this file: {:?}", reason)); - } - SaveResult::Error(e) => { - std::fs::remove_file(&file_path).ok(); - err!(format!("Error: {:?}", e)); - } - }; - - // Set ID and sizes let mut data_value: Value = serde_json::from_str(&send.data)?; if let Some(o) = data_value.as_object_mut() { o.insert(String::from("Id"), Value::String(file_id)); o.insert(String::from("Size"), Value::Number(size.into())); - o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size))); + o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32))); } send.data = serde_json::to_string(&data_value)?; @@ -367,10 +348,10 @@ fn post_access_file( } #[get("/sends//?")] -fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option { +async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option { if let Ok(claims) = crate::auth::decode_send(&t) { if claims.sub == format!("{}/{}", send_id, file_id) { - return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok(); + return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok(); } } None diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs index e72d7b29..7d80cb54 100644 --- a/src/api/core/two_factor/authenticator.rs +++ b/src/api/core/two_factor/authenticator.rs @@ -1,6 +1,6 @@ use data_encoding::BASE32; +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use crate::{ api::{ diff --git a/src/api/core/two_factor/duo.rs b/src/api/core/two_factor/duo.rs index 606e32b2..1450cef0 100644 --- a/src/api/core/two_factor/duo.rs +++ b/src/api/core/two_factor/duo.rs @@ -1,7 +1,7 @@ use chrono::Utc; use data_encoding::BASE64; +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use crate::{ api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData}, @@ -152,7 +152,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool { } #[post("/two-factor/duo", data = "")] -fn activate_duo(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn activate_duo(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableDuoData = data.into_inner().data; let mut user = headers.user; @@ -163,7 +163,7 @@ fn activate_duo(data: JsonUpcase, headers: Headers, conn: DbConn) let (data, data_str) = if check_duo_fields_custom(&data) { let data_req: DuoData = data.into(); let data_str = serde_json::to_string(&data_req)?; - duo_api_request("GET", "/auth/v2/check", "", &data_req).map_res("Failed to validate Duo credentials")?; + duo_api_request("GET", "/auth/v2/check", "", &data_req).await.map_res("Failed to validate Duo credentials")?; (data_req.obscure(), data_str) } else { (DuoData::secret(), String::new()) @@ -185,11 +185,11 @@ fn activate_duo(data: JsonUpcase, headers: Headers, conn: DbConn) } #[put("/two-factor/duo", data = "")] -fn activate_duo_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - activate_duo(data, headers, conn) +async fn activate_duo_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + activate_duo(data, headers, conn).await } -fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult { +async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult { use reqwest::{header, Method}; use std::str::FromStr; @@ -209,7 +209,8 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em .basic_auth(username, Some(password)) .header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)") .header(header::DATE, date) - .send()? + .send() + .await? .error_for_status()?; Ok(()) diff --git a/src/api/core/two_factor/email.rs b/src/api/core/two_factor/email.rs index 0753f62f..51487ee3 100644 --- a/src/api/core/two_factor/email.rs +++ b/src/api/core/two_factor/email.rs @@ -1,6 +1,6 @@ use chrono::{Duration, NaiveDateTime, Utc}; +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use crate::{ api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData}, diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs index 2c48b9cf..105fe9eb 100644 --- a/src/api/core/two_factor/mod.rs +++ b/src/api/core/two_factor/mod.rs @@ -1,7 +1,7 @@ use chrono::{Duration, Utc}; use data_encoding::BASE32; +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use serde_json::Value; use crate::{ @@ -158,14 +158,14 @@ fn disable_twofactor_put(data: JsonUpcase, headers: Header disable_twofactor(data, headers, conn) } -pub fn send_incomplete_2fa_notifications(pool: DbPool) { +pub async fn send_incomplete_2fa_notifications(pool: DbPool) { debug!("Sending notifications for incomplete 2FA logins"); if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { return; } - let conn = match pool.get() { + let conn = match pool.get().await { Ok(conn) => conn, _ => { error!("Failed to get DB connection in send_incomplete_2fa_notifications()"); diff --git a/src/api/core/two_factor/u2f.rs b/src/api/core/two_factor/u2f.rs index f3d42c3e..cb24bcb3 100644 --- a/src/api/core/two_factor/u2f.rs +++ b/src/api/core/two_factor/u2f.rs @@ -1,6 +1,6 @@ use once_cell::sync::Lazy; +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use serde_json::Value; use u2f::{ messages::{RegisterResponse, SignResponse, U2fSignRequest}, diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs index ecc932ca..4dd0c294 100644 --- a/src/api/core/two_factor/webauthn.rs +++ b/src/api/core/two_factor/webauthn.rs @@ -1,5 +1,5 @@ +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use serde_json::Value; use url::Url; use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn}; diff --git a/src/api/core/two_factor/yubikey.rs b/src/api/core/two_factor/yubikey.rs index c088324b..618c755a 100644 --- a/src/api/core/two_factor/yubikey.rs +++ b/src/api/core/two_factor/yubikey.rs @@ -1,5 +1,5 @@ +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use serde_json::Value; use yubico::{config::Config, verify}; diff --git a/src/api/icons.rs b/src/api/icons.rs index a2e5cc3a..33656d02 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -1,19 +1,19 @@ use std::{ collections::HashMap, - fs::{create_dir_all, remove_file, symlink_metadata, File}, - io::prelude::*, net::{IpAddr, ToSocketAddrs}, sync::{Arc, RwLock}, time::{Duration, SystemTime}, }; +use bytes::{Buf, Bytes, BytesMut}; +use futures::{stream::StreamExt, TryFutureExt}; use once_cell::sync::Lazy; use regex::Regex; -use reqwest::{blocking::Client, blocking::Response, header}; -use rocket::{ - http::ContentType, - response::{Content, Redirect}, - Route, +use reqwest::{header, Client, Response}; +use rocket::{http::ContentType, response::Redirect, Route}; +use tokio::{ + fs::{create_dir_all, remove_file, symlink_metadata, File}, + io::{AsyncReadExt, AsyncWriteExt}, }; use crate::{ @@ -104,27 +104,23 @@ fn icon_google(domain: String) -> Option { } #[get("//icon.png")] -fn icon_internal(domain: String) -> Cached>> { +async fn icon_internal(domain: String) -> Cached<(ContentType, Vec)> { const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png"); if !is_valid_domain(&domain) { warn!("Invalid domain: {}", domain); return Cached::ttl( - Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), + (ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl(), true, ); } - match get_icon(&domain) { + match get_icon(&domain).await { Some((icon, icon_type)) => { - Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true) + Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true) } - _ => Cached::ttl( - Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), - CONFIG.icon_cache_negttl(), - true, - ), + _ => Cached::ttl((ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl(), true), } } @@ -317,15 +313,15 @@ fn is_domain_blacklisted(domain: &str) -> bool { is_blacklisted } -fn get_icon(domain: &str) -> Option<(Vec, String)> { +async fn get_icon(domain: &str) -> Option<(Vec, String)> { let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain); // Check for expiration of negatively cached copy - if icon_is_negcached(&path) { + if icon_is_negcached(&path).await { return None; } - if let Some(icon) = get_cached_icon(&path) { + if let Some(icon) = get_cached_icon(&path).await { let icon_type = match get_icon_type(&icon) { Some(x) => x, _ => "x-icon", @@ -338,31 +334,31 @@ fn get_icon(domain: &str) -> Option<(Vec, String)> { } // Get the icon, or None in case of error - match download_icon(domain) { + match download_icon(domain).await { Ok((icon, icon_type)) => { - save_icon(&path, &icon); - Some((icon, icon_type.unwrap_or("x-icon").to_string())) + save_icon(&path, &icon).await; + Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string())) } Err(e) => { warn!("Unable to download icon: {:?}", e); let miss_indicator = path + ".miss"; - save_icon(&miss_indicator, &[]); + save_icon(&miss_indicator, &[]).await; None } } } -fn get_cached_icon(path: &str) -> Option> { +async fn get_cached_icon(path: &str) -> Option> { // Check for expiration of successfully cached copy - if icon_is_expired(path) { + if icon_is_expired(path).await { return None; } // Try to read the cached icon, and return it if it exists - if let Ok(mut f) = File::open(path) { + if let Ok(mut f) = File::open(path).await { let mut buffer = Vec::new(); - if f.read_to_end(&mut buffer).is_ok() { + if f.read_to_end(&mut buffer).await.is_ok() { return Some(buffer); } } @@ -370,22 +366,22 @@ fn get_cached_icon(path: &str) -> Option> { None } -fn file_is_expired(path: &str, ttl: u64) -> Result { - let meta = symlink_metadata(path)?; +async fn file_is_expired(path: &str, ttl: u64) -> Result { + let meta = symlink_metadata(path).await?; let modified = meta.modified()?; let age = SystemTime::now().duration_since(modified)?; Ok(ttl > 0 && ttl <= age.as_secs()) } -fn icon_is_negcached(path: &str) -> bool { +async fn icon_is_negcached(path: &str) -> bool { let miss_indicator = path.to_owned() + ".miss"; - let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl()); + let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl()).await; match expired { // No longer negatively cached, drop the marker Ok(true) => { - if let Err(e) = remove_file(&miss_indicator) { + if let Err(e) = remove_file(&miss_indicator).await { error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e); } false @@ -397,8 +393,8 @@ fn icon_is_negcached(path: &str) -> bool { } } -fn icon_is_expired(path: &str) -> bool { - let expired = file_is_expired(path, CONFIG.icon_cache_ttl()); +async fn icon_is_expired(path: &str) -> bool { + let expired = file_is_expired(path, CONFIG.icon_cache_ttl()).await; expired.unwrap_or(true) } @@ -521,13 +517,13 @@ struct IconUrlResult { /// let icon_result = get_icon_url("github.com")?; /// let icon_result = get_icon_url("vaultwarden.discourse.group")?; /// ``` -fn get_icon_url(domain: &str) -> Result { +async fn get_icon_url(domain: &str) -> Result { // Default URL with secure and insecure schemes let ssldomain = format!("https://{}", domain); let httpdomain = format!("http://{}", domain); // First check the domain as given during the request for both HTTPS and HTTP. - let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) { + let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await { Ok(c) => Ok(c), Err(e) => { let mut sub_resp = Err(e); @@ -546,7 +542,7 @@ fn get_icon_url(domain: &str) -> Result { let httpbase = format!("http://{}", base_domain); debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain); - sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)); + sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await; } // When the domain is not an IP, and has less then 2 dots, try to add www. infront of it. @@ -557,7 +553,7 @@ fn get_icon_url(domain: &str) -> Result { let httpwww = format!("http://{}", www_domain); debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain); - sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)); + sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await; } } @@ -581,7 +577,7 @@ fn get_icon_url(domain: &str) -> Result { iconlist.push(Icon::new(35, String::from(url.join("/favicon.ico").unwrap()))); // 384KB should be more than enough for the HTML, though as we only really need the HTML header. - let mut limited_reader = content.take(384 * 1024); + let mut limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.reader(); use html5ever::tendril::TendrilSink; let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default()) @@ -607,11 +603,11 @@ fn get_icon_url(domain: &str) -> Result { }) } -fn get_page(url: &str) -> Result { - get_page_with_referer(url, "") +async fn get_page(url: &str) -> Result { + get_page_with_referer(url, "").await } -fn get_page_with_referer(url: &str, referer: &str) -> Result { +async fn get_page_with_referer(url: &str, referer: &str) -> Result { if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()) { warn!("Favicon '{}' resolves to a blacklisted domain or IP!", url); } @@ -621,7 +617,7 @@ fn get_page_with_referer(url: &str, referer: &str) -> Result { client = client.header("Referer", referer) } - match client.send() { + match client.send().await { Ok(c) => c.error_for_status().map_err(Into::into), Err(e) => err_silent!(format!("{}", e)), } @@ -706,14 +702,14 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) { (width, height) } -fn download_icon(domain: &str) -> Result<(Vec, Option<&str>), Error> { +async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { if is_domain_blacklisted(domain) { err_silent!("Domain is blacklisted", domain) } - let icon_result = get_icon_url(domain)?; + let icon_result = get_icon_url(domain).await?; - let mut buffer = Vec::new(); + let mut buffer = Bytes::new(); let mut icon_type: Option<&str> = None; use data_url::DataUrl; @@ -722,8 +718,12 @@ fn download_icon(domain: &str) -> Result<(Vec, Option<&str>), Error> { if icon.href.starts_with("data:image") { let datauri = DataUrl::process(&icon.href).unwrap(); // Check if we are able to decode the data uri - match datauri.decode_to_vec() { - Ok((body, _fragment)) => { + let mut body = BytesMut::new(); + match datauri.decode::<_, ()>(|bytes| { + body.extend_from_slice(bytes); + Ok(()) + }) { + Ok(_) => { // Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create if body.len() >= 67 { // Check if the icon type is allowed, else try an icon from the list. @@ -733,17 +733,17 @@ fn download_icon(domain: &str) -> Result<(Vec, Option<&str>), Error> { continue; } info!("Extracted icon from data:image uri for {}", domain); - buffer = body; + buffer = body.freeze(); break; } } _ => debug!("Extracted icon from data:image uri is invalid"), }; } else { - match get_page_with_referer(&icon.href, &icon_result.referer) { - Ok(mut res) => { - res.copy_to(&mut buffer)?; - // Check if the icon type is allowed, else try an icon from the list. + match get_page_with_referer(&icon.href, &icon_result.referer).await { + Ok(res) => { + buffer = stream_to_bytes_limit(res, 512 * 1024).await?; // 512 KB for each icon max + // Check if the icon type is allowed, else try an icon from the list. icon_type = get_icon_type(&buffer); if icon_type.is_none() { buffer.clear(); @@ -765,13 +765,13 @@ fn download_icon(domain: &str) -> Result<(Vec, Option<&str>), Error> { Ok((buffer, icon_type)) } -fn save_icon(path: &str, icon: &[u8]) { - match File::create(path) { +async fn save_icon(path: &str, icon: &[u8]) { + match File::create(path).await { Ok(mut f) => { - f.write_all(icon).expect("Error writing icon file"); + f.write_all(icon).await.expect("Error writing icon file"); } Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => { - create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache folder"); + create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder"); } Err(e) => { warn!("Unable to save icon: {:?}", e); @@ -820,8 +820,6 @@ impl reqwest::cookie::CookieStore for Jar { } fn cookies(&self, url: &url::Url) -> Option { - use bytes::Bytes; - let cookie_store = self.0.read().unwrap(); let s = cookie_store .get_request_values(url) @@ -836,3 +834,12 @@ impl reqwest::cookie::CookieStore for Jar { header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok() } } + +async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result { + let mut stream = res.bytes_stream().take(max_size); + let mut buf = BytesMut::new(); + while let Some(chunk) = stream.next().await { + buf.extend(chunk?); + } + Ok(buf.freeze()) +} diff --git a/src/api/identity.rs b/src/api/identity.rs index 0adc542f..0ad8a1b5 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -1,10 +1,10 @@ use chrono::Utc; use num_traits::FromPrimitive; +use rocket::serde::json::Json; use rocket::{ - request::{Form, FormItems, FromForm}, + form::{Form, FromForm}, Route, }; -use rocket_contrib::json::Json; use serde_json::Value; use crate::{ @@ -455,66 +455,57 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api // https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts // https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, FromForm)] #[allow(non_snake_case)] struct ConnectData { - // refresh_token, password, client_credentials (API key) - grant_type: String, + #[field(name = uncased("grant_type"))] + #[field(name = uncased("granttype"))] + grant_type: String, // refresh_token, password, client_credentials (API key) // Needed for grant_type="refresh_token" + #[field(name = uncased("refresh_token"))] + #[field(name = uncased("refreshtoken"))] refresh_token: Option, // Needed for grant_type = "password" | "client_credentials" - client_id: Option, // web, cli, desktop, browser, mobile - client_secret: Option, // API key login (cli only) + #[field(name = uncased("client_id"))] + #[field(name = uncased("clientid"))] + client_id: Option, // web, cli, desktop, browser, mobile + #[field(name = uncased("client_secret"))] + #[field(name = uncased("clientsecret"))] + client_secret: Option, + #[field(name = uncased("password"))] password: Option, + #[field(name = uncased("scope"))] scope: Option, + #[field(name = uncased("username"))] username: Option, + #[field(name = uncased("device_identifier"))] + #[field(name = uncased("deviceidentifier"))] device_identifier: Option, + #[field(name = uncased("device_name"))] + #[field(name = uncased("devicename"))] device_name: Option, + #[field(name = uncased("device_type"))] + #[field(name = uncased("devicetype"))] device_type: Option, + #[field(name = uncased("device_push_token"))] + #[field(name = uncased("devicepushtoken"))] device_push_token: Option, // Unused; mobile device push not yet supported. // Needed for two-factor auth + #[field(name = uncased("two_factor_provider"))] + #[field(name = uncased("twofactorprovider"))] two_factor_provider: Option, + #[field(name = uncased("two_factor_token"))] + #[field(name = uncased("twofactortoken"))] two_factor_token: Option, + #[field(name = uncased("two_factor_remember"))] + #[field(name = uncased("twofactorremember"))] two_factor_remember: Option, } -impl<'f> FromForm<'f> for ConnectData { - type Error = String; - - fn from_form(items: &mut FormItems<'f>, _strict: bool) -> Result { - let mut form = Self::default(); - for item in items { - let (key, value) = item.key_value_decoded(); - let mut normalized_key = key.to_lowercase(); - normalized_key.retain(|c| c != '_'); // Remove '_' - - match normalized_key.as_ref() { - "granttype" => form.grant_type = value, - "refreshtoken" => form.refresh_token = Some(value), - "clientid" => form.client_id = Some(value), - "clientsecret" => form.client_secret = Some(value), - "password" => form.password = Some(value), - "scope" => form.scope = Some(value), - "username" => form.username = Some(value), - "deviceidentifier" => form.device_identifier = Some(value), - "devicename" => form.device_name = Some(value), - "devicetype" => form.device_type = Some(value), - "devicepushtoken" => form.device_push_token = Some(value), - "twofactorprovider" => form.two_factor_provider = value.parse().ok(), - "twofactortoken" => form.two_factor_token = Some(value), - "twofactorremember" => form.two_factor_remember = value.parse().ok(), - key => warn!("Detected unexpected parameter during login: {}", key), - } - } - - Ok(form) - } -} - fn _check_is_some(value: &Option, msg: &str) -> EmptyResult { if value.is_none() { err!(msg) diff --git a/src/api/mod.rs b/src/api/mod.rs index 3546acd7..99fb98be 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -5,7 +5,7 @@ mod identity; mod notifications; mod web; -use rocket_contrib::json::Json; +use rocket::serde::json::Json; use serde_json::Value; pub use crate::api::{ diff --git a/src/api/notifications.rs b/src/api/notifications.rs index 77539969..20bbee96 100644 --- a/src/api/notifications.rs +++ b/src/api/notifications.rs @@ -1,7 +1,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; +use rocket::serde::json::Json; use rocket::Route; -use rocket_contrib::json::Json; use serde_json::Value as JsonValue; use crate::{api::EmptyResult, auth::Headers, Error, CONFIG}; @@ -417,7 +417,7 @@ pub enum UpdateType { } use rocket::State; -pub type Notify<'a> = State<'a, WebSocketUsers>; +pub type Notify<'a> = &'a State; pub fn start_notification_server() -> WebSocketUsers { let factory = WsFactory::init(); @@ -430,12 +430,11 @@ pub fn start_notification_server() -> WebSocketUsers { settings.queue_size = 2; settings.panic_on_internal = false; - ws::Builder::new() - .with_settings(settings) - .build(factory) - .unwrap() - .listen((CONFIG.websocket_address().as_str(), CONFIG.websocket_port())) - .unwrap(); + let ws = ws::Builder::new().with_settings(settings).build(factory).unwrap(); + CONFIG.set_ws_shutdown_handle(ws.broadcaster()); + ws.listen((CONFIG.websocket_address().as_str(), CONFIG.websocket_port())).unwrap(); + + warn!("WS Server stopped!"); }); } diff --git a/src/api/web.rs b/src/api/web.rs index 9a5f74cc..f911436a 100644 --- a/src/api/web.rs +++ b/src/api/web.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; -use rocket::{http::ContentType, response::content::Content, response::NamedFile, Route}; -use rocket_contrib::json::Json; +use rocket::serde::json::Json; +use rocket::{fs::NamedFile, http::ContentType, Route}; use serde_json::Value; use crate::{ @@ -21,16 +21,16 @@ pub fn routes() -> Vec { } #[get("/")] -fn web_index() -> Cached> { - Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).ok(), false) +async fn web_index() -> Cached> { + Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false) } #[get("/app-id.json")] -fn app_id() -> Cached>> { +fn app_id() -> Cached<(ContentType, Json)> { let content_type = ContentType::new("application", "fido.trusted-apps+json"); Cached::long( - Content( + ( content_type, Json(json!({ "trustedFacets": [ @@ -58,13 +58,13 @@ fn app_id() -> Cached>> { } #[get("/", rank = 10)] // Only match this if the other routes don't match -fn web_files(p: PathBuf) -> Cached> { - Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).ok(), true) +async fn web_files(p: PathBuf) -> Cached> { + Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true) } #[get("/attachments//")] -fn attachments(uuid: SafeString, file_id: SafeString) -> Option { - NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).ok() +async fn attachments(uuid: SafeString, file_id: SafeString) -> Option { + NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok() } // We use DbConn here to let the alive healthcheck also verify the database connection. @@ -78,25 +78,20 @@ fn alive(_conn: DbConn) -> Json { } #[get("/vw_static/")] -fn static_files(filename: String) -> Result, Error> { +fn static_files(filename: String) -> Result<(ContentType, &'static [u8]), Error> { match filename.as_ref() { - "mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))), - "logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))), - "error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))), - "hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))), - "vaultwarden-icon.png" => { - Ok(Content(ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png"))) - } - - "bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))), - "bootstrap-native.js" => { - Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))) - } - "identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))), - "datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))), - "datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))), + "mail-github.png" => Ok((ContentType::PNG, include_bytes!("../static/images/mail-github.png"))), + "logo-gray.png" => Ok((ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))), + "error-x.svg" => Ok((ContentType::SVG, include_bytes!("../static/images/error-x.svg"))), + "hibp.png" => Ok((ContentType::PNG, include_bytes!("../static/images/hibp.png"))), + "vaultwarden-icon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png"))), + "bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))), + "bootstrap-native.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))), + "identicon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))), + "datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))), + "datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))), "jquery-3.6.0.slim.js" => { - Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.0.slim.js"))) + Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.0.slim.js"))) } _ => err!(format!("Static file not found: {}", filename)), } diff --git a/src/auth.rs b/src/auth.rs index 741d4e95..6aedae81 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -257,7 +257,10 @@ pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims { // // Bearer token authentication // -use rocket::request::{FromRequest, Outcome, Request}; +use rocket::{ + outcome::try_outcome, + request::{FromRequest, Outcome, Request}, +}; use crate::db::{ models::{CollectionUser, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException}, @@ -268,10 +271,11 @@ pub struct Host { pub host: String, } -impl<'a, 'r> FromRequest<'a, 'r> for Host { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for Host { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> Outcome { + async fn from_request(request: &'r Request<'_>) -> Outcome { let headers = request.headers(); // Get host @@ -314,17 +318,14 @@ pub struct Headers { pub user: User, } -impl<'a, 'r> FromRequest<'a, 'r> for Headers { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for Headers { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> Outcome { + async fn from_request(request: &'r Request<'_>) -> Outcome { let headers = request.headers(); - let host = match Host::from_request(request) { - Outcome::Forward(_) => return Outcome::Forward(()), - Outcome::Failure(f) => return Outcome::Failure(f), - Outcome::Success(host) => host.host, - }; + let host = try_outcome!(Host::from_request(request).await).host; // Get access_token let access_token: &str = match headers.get_one("Authorization") { @@ -344,7 +345,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers { let device_uuid = claims.device; let user_uuid = claims.sub; - let conn = match request.guard::() { + let conn = match DbConn::from_request(request).await { Outcome::Success(conn) => conn, _ => err_handler!("Error getting DB"), }; @@ -363,7 +364,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers { if let Some(stamp_exception) = user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::(s).ok()) { - let current_route = match request.route().and_then(|r| r.name) { + let current_route = match request.route().and_then(|r| r.name.as_deref()) { Some(name) => name, _ => err_handler!("Error getting current route for stamp exception"), }; @@ -411,13 +412,13 @@ pub struct OrgHeaders { // but there are cases where it is a query value. // First check the path, if this is not a valid uuid, try the query values. fn get_org_id(request: &Request) -> Option { - if let Some(Ok(org_id)) = request.get_param::(1) { + if let Some(Ok(org_id)) = request.param::(1) { if uuid::Uuid::parse_str(&org_id).is_ok() { return Some(org_id); } } - if let Some(Ok(org_id)) = request.get_query_value::("organizationId") { + if let Some(Ok(org_id)) = request.query_value::("organizationId") { if uuid::Uuid::parse_str(&org_id).is_ok() { return Some(org_id); } @@ -426,52 +427,48 @@ fn get_org_id(request: &Request) -> Option { None } -impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for OrgHeaders { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> Outcome { - match request.guard::() { - Outcome::Forward(_) => Outcome::Forward(()), - Outcome::Failure(f) => Outcome::Failure(f), - Outcome::Success(headers) => { - match get_org_id(request) { - Some(org_id) => { - let conn = match request.guard::() { - Outcome::Success(conn) => conn, - _ => err_handler!("Error getting DB"), - }; + async fn from_request(request: &'r Request<'_>) -> Outcome { + let headers = try_outcome!(Headers::from_request(request).await); + match get_org_id(request) { + Some(org_id) => { + let conn = match DbConn::from_request(request).await { + Outcome::Success(conn) => conn, + _ => err_handler!("Error getting DB"), + }; - let user = headers.user; - let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) { - Some(user) => { - if user.status == UserOrgStatus::Confirmed as i32 { - user - } else { - err_handler!("The current user isn't confirmed member of the organization") - } - } - None => err_handler!("The current user isn't member of the organization"), - }; - - Outcome::Success(Self { - host: headers.host, - device: headers.device, - user, - org_user_type: { - if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) { - org_usr_type - } else { - // This should only happen if the DB is corrupted - err_handler!("Unknown user type in the database") - } - }, - org_user, - org_id, - }) + let user = headers.user; + let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) { + Some(user) => { + if user.status == UserOrgStatus::Confirmed as i32 { + user + } else { + err_handler!("The current user isn't confirmed member of the organization") + } } - _ => err_handler!("Error getting the organization id"), - } + None => err_handler!("The current user isn't member of the organization"), + }; + + Outcome::Success(Self { + host: headers.host, + device: headers.device, + user, + org_user_type: { + if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) { + org_usr_type + } else { + // This should only happen if the DB is corrupted + err_handler!("Unknown user type in the database") + } + }, + org_user, + org_id, + }) } + _ => err_handler!("Error getting the organization id"), } } } @@ -483,25 +480,21 @@ pub struct AdminHeaders { pub org_user_type: UserOrgType, } -impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for AdminHeaders { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> Outcome { - match request.guard::() { - Outcome::Forward(_) => Outcome::Forward(()), - Outcome::Failure(f) => Outcome::Failure(f), - Outcome::Success(headers) => { - if headers.org_user_type >= UserOrgType::Admin { - Outcome::Success(Self { - host: headers.host, - device: headers.device, - user: headers.user, - org_user_type: headers.org_user_type, - }) - } else { - err_handler!("You need to be Admin or Owner to call this endpoint") - } - } + async fn from_request(request: &'r Request<'_>) -> Outcome { + let headers = try_outcome!(OrgHeaders::from_request(request).await); + if headers.org_user_type >= UserOrgType::Admin { + Outcome::Success(Self { + host: headers.host, + device: headers.device, + user: headers.user, + org_user_type: headers.org_user_type, + }) + } else { + err_handler!("You need to be Admin or Owner to call this endpoint") } } } @@ -520,13 +513,13 @@ impl From for Headers { // but there could be cases where it is a query value. // First check the path, if this is not a valid uuid, try the query values. fn get_col_id(request: &Request) -> Option { - if let Some(Ok(col_id)) = request.get_param::(3) { + if let Some(Ok(col_id)) = request.param::(3) { if uuid::Uuid::parse_str(&col_id).is_ok() { return Some(col_id); } } - if let Some(Ok(col_id)) = request.get_query_value::("collectionId") { + if let Some(Ok(col_id)) = request.query_value::("collectionId") { if uuid::Uuid::parse_str(&col_id).is_ok() { return Some(col_id); } @@ -545,46 +538,38 @@ pub struct ManagerHeaders { pub org_user_type: UserOrgType, } -impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for ManagerHeaders { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> Outcome { - match request.guard::() { - Outcome::Forward(_) => Outcome::Forward(()), - Outcome::Failure(f) => Outcome::Failure(f), - Outcome::Success(headers) => { - if headers.org_user_type >= UserOrgType::Manager { - match get_col_id(request) { - Some(col_id) => { - let conn = match request.guard::() { - Outcome::Success(conn) => conn, - _ => err_handler!("Error getting DB"), - }; + async fn from_request(request: &'r Request<'_>) -> Outcome { + let headers = try_outcome!(OrgHeaders::from_request(request).await); + if headers.org_user_type >= UserOrgType::Manager { + match get_col_id(request) { + Some(col_id) => { + let conn = match DbConn::from_request(request).await { + Outcome::Success(conn) => conn, + _ => err_handler!("Error getting DB"), + }; - if !headers.org_user.has_full_access() { - match CollectionUser::find_by_collection_and_user( - &col_id, - &headers.org_user.user_uuid, - &conn, - ) { - Some(_) => (), - None => err_handler!("The current user isn't a manager for this collection"), - } - } + if !headers.org_user.has_full_access() { + match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) { + Some(_) => (), + None => err_handler!("The current user isn't a manager for this collection"), } - _ => err_handler!("Error getting the collection id"), } - - Outcome::Success(Self { - host: headers.host, - device: headers.device, - user: headers.user, - org_user_type: headers.org_user_type, - }) - } else { - err_handler!("You need to be a Manager, Admin or Owner to call this endpoint") } + _ => err_handler!("Error getting the collection id"), } + + Outcome::Success(Self { + host: headers.host, + device: headers.device, + user: headers.user, + org_user_type: headers.org_user_type, + }) + } else { + err_handler!("You need to be a Manager, Admin or Owner to call this endpoint") } } } @@ -608,25 +593,21 @@ pub struct ManagerHeadersLoose { pub org_user_type: UserOrgType, } -impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeadersLoose { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for ManagerHeadersLoose { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> Outcome { - match request.guard::() { - Outcome::Forward(_) => Outcome::Forward(()), - Outcome::Failure(f) => Outcome::Failure(f), - Outcome::Success(headers) => { - if headers.org_user_type >= UserOrgType::Manager { - Outcome::Success(Self { - host: headers.host, - device: headers.device, - user: headers.user, - org_user_type: headers.org_user_type, - }) - } else { - err_handler!("You need to be a Manager, Admin or Owner to call this endpoint") - } - } + async fn from_request(request: &'r Request<'_>) -> Outcome { + let headers = try_outcome!(OrgHeaders::from_request(request).await); + if headers.org_user_type >= UserOrgType::Manager { + Outcome::Success(Self { + host: headers.host, + device: headers.device, + user: headers.user, + org_user_type: headers.org_user_type, + }) + } else { + err_handler!("You need to be a Manager, Admin or Owner to call this endpoint") } } } @@ -647,24 +628,20 @@ pub struct OwnerHeaders { pub user: User, } -impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for OwnerHeaders { type Error = &'static str; - fn from_request(request: &'a Request<'r>) -> Outcome { - match request.guard::() { - Outcome::Forward(_) => Outcome::Forward(()), - Outcome::Failure(f) => Outcome::Failure(f), - Outcome::Success(headers) => { - if headers.org_user_type == UserOrgType::Owner { - Outcome::Success(Self { - host: headers.host, - device: headers.device, - user: headers.user, - }) - } else { - err_handler!("You need to be Owner to call this endpoint") - } - } + async fn from_request(request: &'r Request<'_>) -> Outcome { + let headers = try_outcome!(OrgHeaders::from_request(request).await); + if headers.org_user_type == UserOrgType::Owner { + Outcome::Success(Self { + host: headers.host, + device: headers.device, + user: headers.user, + }) + } else { + err_handler!("You need to be Owner to call this endpoint") } } } @@ -678,10 +655,11 @@ pub struct ClientIp { pub ip: IpAddr, } -impl<'a, 'r> FromRequest<'a, 'r> for ClientIp { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for ClientIp { type Error = (); - fn from_request(req: &'a Request<'r>) -> Outcome { + async fn from_request(req: &'r Request<'_>) -> Outcome { let ip = if CONFIG._ip_header_enabled() { req.headers().get_one(&CONFIG.ip_header()).and_then(|ip| { match ip.find(',') { diff --git a/src/config.rs b/src/config.rs index 9e1f5e56..efe38754 100644 --- a/src/config.rs +++ b/src/config.rs @@ -36,6 +36,9 @@ macro_rules! make_config { pub struct Config { inner: RwLock } struct Inner { + rocket_shutdown_handle: Option, + ws_shutdown_handle: Option, + templates: Handlebars<'static>, config: ConfigItems, @@ -332,6 +335,8 @@ make_config! { attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments"); /// Sends folder sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends"); + /// Temp folder |> Used for storing temporary file uploads + tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp"); /// Templates folder templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates"); /// Session JWT key @@ -509,6 +514,9 @@ make_config! { /// Max database connection retries |> Number of times to retry the database connection during startup, with 1 second between each retry, set to 0 to retry indefinitely db_connection_retries: u32, false, def, 15; + /// Timeout when aquiring database connection + database_timeout: u64, false, def, 30; + /// Database connection pool size database_max_conns: u32, false, def, 10; @@ -743,6 +751,8 @@ impl Config { Ok(Config { inner: RwLock::new(Inner { + rocket_shutdown_handle: None, + ws_shutdown_handle: None, templates: load_templates(&config.templates_folder), config, _env, @@ -907,6 +917,27 @@ impl Config { hb.render(name, data).map_err(Into::into) } } + + pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) { + self.inner.write().unwrap().rocket_shutdown_handle = Some(handle); + } + + pub fn set_ws_shutdown_handle(&self, handle: ws::Sender) { + self.inner.write().unwrap().ws_shutdown_handle = Some(handle); + } + + pub fn shutdown(&self) { + if let Ok(c) = self.inner.read() { + if let Some(handle) = c.ws_shutdown_handle.clone() { + handle.shutdown().ok(); + } + // Wait a bit before stopping the web server + std::thread::sleep(std::time::Duration::from_secs(1)); + if let Some(handle) = c.rocket_shutdown_handle.clone() { + handle.notify(); + } + } + } } use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable}; diff --git a/src/db/mod.rs b/src/db/mod.rs index bcbb7ce4..db52d513 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -1,8 +1,16 @@ +use std::{sync::Arc, time::Duration}; + use diesel::r2d2::{ConnectionManager, Pool, PooledConnection}; use rocket::{ http::Status, + outcome::IntoOutcome, request::{FromRequest, Outcome}, - Request, State, + Request, +}; + +use tokio::{ + sync::{Mutex, OwnedSemaphorePermit, Semaphore}, + time::timeout, }; use crate::{ @@ -22,6 +30,23 @@ pub mod __mysql_schema; #[path = "schemas/postgresql/schema.rs"] pub mod __postgresql_schema; +// There changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools + +// A wrapper around spawn_blocking that propagates panics to the calling code. +pub async fn run_blocking(job: F) -> R +where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + match tokio::task::spawn_blocking(job).await { + Ok(ret) => ret, + Err(e) => match e.try_into_panic() { + Ok(panic) => std::panic::resume_unwind(panic), + Err(_) => unreachable!("spawn_blocking tasks are never cancelled"), + }, + } +} + // This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported macro_rules! generate_connections { ( $( $name:ident: $ty:ty ),+ ) => { @@ -29,12 +54,53 @@ macro_rules! generate_connections { #[derive(Eq, PartialEq)] pub enum DbConnType { $( $name, )+ } + pub struct DbConn { + conn: Arc>>, + permit: Option, + } + #[allow(non_camel_case_types)] - pub enum DbConn { $( #[cfg($name)] $name(PooledConnection>), )+ } + pub enum DbConnInner { $( #[cfg($name)] $name(PooledConnection>), )+ } + + + #[derive(Clone)] + pub struct DbPool { + // This is an 'Option' so that we can drop the pool in a 'spawn_blocking'. + pool: Option, + semaphore: Arc + } #[allow(non_camel_case_types)] #[derive(Clone)] - pub enum DbPool { $( #[cfg($name)] $name(Pool>), )+ } + pub enum DbPoolInner { $( #[cfg($name)] $name(Pool>), )+ } + + impl Drop for DbConn { + fn drop(&mut self) { + let conn = self.conn.clone(); + let permit = self.permit.take(); + + // Since connection can't be on the stack in an async fn during an + // await, we have to spawn a new blocking-safe thread... + tokio::task::spawn_blocking(move || { + // And then re-enter the runtime to wait on the async mutex, but in a blocking fashion. + let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned()); + + if let Some(conn) = conn.take() { + drop(conn); + } + + // Drop permit after the connection is dropped + drop(permit); + }); + } + } + + impl Drop for DbPool { + fn drop(&mut self) { + let pool = self.pool.take(); + tokio::task::spawn_blocking(move || drop(pool)); + } + } impl DbPool { // For the given database URL, guess it's type, run migrations create pool and return it @@ -50,9 +116,13 @@ macro_rules! generate_connections { let manager = ConnectionManager::new(&url); let pool = Pool::builder() .max_size(CONFIG.database_max_conns()) + .connection_timeout(Duration::from_secs(CONFIG.database_timeout())) .build(manager) .map_res("Failed to create pool")?; - return Ok(Self::$name(pool)); + return Ok(DbPool { + pool: Some(DbPoolInner::$name(pool)), + semaphore: Arc::new(Semaphore::new(CONFIG.database_max_conns() as usize)), + }); } #[cfg(not($name))] #[allow(unreachable_code)] @@ -61,10 +131,26 @@ macro_rules! generate_connections { )+ } } // Get a connection from the pool - pub fn get(&self) -> Result { - match self { $( + pub async fn get(&self) -> Result { + let duration = Duration::from_secs(CONFIG.database_timeout()); + let permit = match timeout(duration, self.semaphore.clone().acquire_owned()).await { + Ok(p) => p.expect("Semaphore should be open"), + Err(_) => { + err!("Timeout waiting for database connection"); + } + }; + + match self.pool.as_ref().expect("DbPool.pool should always be Some()") { $( #[cfg($name)] - Self::$name(p) => Ok(DbConn::$name(p.get().map_res("Error retrieving connection from pool")?)), + DbPoolInner::$name(p) => { + let pool = p.clone(); + let c = run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?; + + return Ok(DbConn { + conn: Arc::new(Mutex::new(Some(DbConnInner::$name(c)))), + permit: Some(permit) + }); + }, )+ } } } @@ -113,42 +199,95 @@ macro_rules! db_run { db_run! { $conn: sqlite, mysql, postgresql $body } }; - // Different code for each db - ( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ - #[allow(unused)] use diesel::prelude::*; - match $conn { - $($( - #[cfg($db)] - crate::db::DbConn::$db(ref $conn) => { - paste::paste! { - #[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *}; - #[allow(unused)] use [<__ $db _model>]::*; - #[allow(unused)] use crate::db::FromDb; - } - $body - }, - )+)+ - }} - }; - - // Same for all dbs ( @raw $conn:ident: $body:block ) => { db_run! { @raw $conn: sqlite, mysql, postgresql $body } }; // Different code for each db - ( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => { + ( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ #[allow(unused)] use diesel::prelude::*; - #[allow(unused_variables)] - match $conn { - $($( - #[cfg($db)] - crate::db::DbConn::$db(ref $conn) => { - $body - }, - )+)+ - } - }; + + // It is important that this inner Arc> (or the OwnedMutexGuard + // derived from it) never be a variable on the stack at an await point, + // where Drop might be called at any time. This causes (synchronous) + // Drop to be called from asynchronous code, which some database + // wrappers do not or can not handle. + let conn = $conn.conn.clone(); + + // Since connection can't be on the stack in an async fn during an + // await, we have to spawn a new blocking-safe thread... + /* + run_blocking(move || { + // And then re-enter the runtime to wait on the async mutex, but in + // a blocking fashion. + let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned()); + let conn = conn.as_mut().expect("internal invariant broken: self.connection is Some"); + */ + let mut __conn_mutex = conn.try_lock_owned().unwrap(); + let conn = __conn_mutex.as_mut().unwrap(); + match conn { + $($( + #[cfg($db)] + crate::db::DbConnInner::$db($conn) => { + paste::paste! { + #[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *}; + #[allow(unused)] use [<__ $db _model>]::*; + #[allow(unused)] use crate::db::FromDb; + } + + /* + // Since connection can't be on the stack in an async fn during an + // await, we have to spawn a new blocking-safe thread... + run_blocking(move || { + // And then re-enter the runtime to wait on the async mutex, but in + // a blocking fashion. + let mut conn = tokio::runtime::Handle::current().block_on(async { + conn.lock_owned().await + }); + + let conn = conn.as_mut().expect("internal invariant broken: self.connection is Some"); + f(conn) + }).await;*/ + + $body + }, + )+)+ + } + // }).await + }}; + + ( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ + #[allow(unused)] use diesel::prelude::*; + + // It is important that this inner Arc> (or the OwnedMutexGuard + // derived from it) never be a variable on the stack at an await point, + // where Drop might be called at any time. This causes (synchronous) + // Drop to be called from asynchronous code, which some database + // wrappers do not or can not handle. + let conn = $conn.conn.clone(); + + // Since connection can't be on the stack in an async fn during an + // await, we have to spawn a new blocking-safe thread... + run_blocking(move || { + // And then re-enter the runtime to wait on the async mutex, but in + // a blocking fashion. + let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned()); + match conn.as_mut().expect("internal invariant broken: self.connection is Some") { + $($( + #[cfg($db)] + crate::db::DbConnInner::$db($conn) => { + paste::paste! { + #[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *}; + // @RAW: #[allow(unused)] use [<__ $db _model>]::*; + #[allow(unused)] use crate::db::FromDb; + } + + $body + }, + )+)+ + } + }).await + }}; } pub trait FromDb { @@ -227,9 +366,10 @@ pub mod models; /// Creates a back-up of the sqlite database /// MySQL/MariaDB and PostgreSQL are not supported. -pub fn backup_database(conn: &DbConn) -> Result<(), Error> { +pub async fn backup_database(conn: &DbConn) -> Result<(), Error> { db_run! {@raw conn: postgresql, mysql { + let _ = conn; err!("PostgreSQL and MySQL/MariaDB do not support this backup feature"); } sqlite { @@ -244,7 +384,7 @@ pub fn backup_database(conn: &DbConn) -> Result<(), Error> { } /// Get the SQL Server version -pub fn get_sql_server_version(conn: &DbConn) -> String { +pub async fn get_sql_server_version(conn: &DbConn) -> String { db_run! {@raw conn: postgresql, mysql { no_arg_sql_function!(version, diesel::sql_types::Text); @@ -260,15 +400,14 @@ pub fn get_sql_server_version(conn: &DbConn) -> String { /// Attempts to retrieve a single connection from the managed database pool. If /// no pool is currently managed, fails with an `InternalServerError` status. If /// no connections are available, fails with a `ServiceUnavailable` status. -impl<'a, 'r> FromRequest<'a, 'r> for DbConn { +#[rocket::async_trait] +impl<'r> FromRequest<'r> for DbConn { type Error = (); - fn from_request(request: &'a Request<'r>) -> Outcome { - // https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c - let pool = try_outcome!(request.guard::>()); - match pool.get() { - Ok(conn) => Outcome::Success(conn), - Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())), + async fn from_request(request: &'r Request<'_>) -> Outcome { + match request.rocket().state::() { + Some(p) => p.get().await.map_err(|_| ()).into_outcome(Status::ServiceUnavailable), + None => Outcome::Failure((Status::InternalServerError, ())), } } } diff --git a/src/error.rs b/src/error.rs index 8b0adace..babe82ad 100644 --- a/src/error.rs +++ b/src/error.rs @@ -45,6 +45,7 @@ use lettre::transport::smtp::Error as SmtpErr; use openssl::error::ErrorStack as SSLErr; use regex::Error as RegexErr; use reqwest::Error as ReqErr; +use rocket::error::Error as RocketErr; use serde_json::{Error as SerdeErr, Value}; use std::io::Error as IoErr; use std::time::SystemTimeError as TimeErr; @@ -84,6 +85,7 @@ make_error! { Address(AddrErr): _has_source, _api_error, Smtp(SmtpErr): _has_source, _api_error, OpenSSL(SSLErr): _has_source, _api_error, + Rocket(RocketErr): _has_source, _api_error, DieselCon(DieselConErr): _has_source, _api_error, DieselMig(DieselMigErr): _has_source, _api_error, @@ -193,8 +195,8 @@ use rocket::http::{ContentType, Status}; use rocket::request::Request; use rocket::response::{self, Responder, Response}; -impl<'r> Responder<'r> for Error { - fn respond_to(self, _: &Request) -> response::Result<'r> { +impl<'r> Responder<'r, 'static> for Error { + fn respond_to(self, _: &Request) -> response::Result<'static> { match self.error { ErrorKind::Empty(_) => {} // Don't print the error in this situation ErrorKind::Simple(_) => {} // Don't print the error in this situation @@ -202,8 +204,8 @@ impl<'r> Responder<'r> for Error { }; let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest); - - Response::build().status(code).header(ContentType::JSON).sized_body(Cursor::new(format!("{}", self))).ok() + let body = self.to_string(); + Response::build().status(code).header(ContentType::JSON).sized_body(Some(body.len()), Cursor::new(body)).ok() } } diff --git a/src/main.rs b/src/main.rs index d7bef292..bcbf1d29 100644 --- a/src/main.rs +++ b/src/main.rs @@ -20,8 +20,15 @@ extern crate diesel; #[macro_use] extern crate diesel_migrations; -use job_scheduler::{Job, JobScheduler}; -use std::{fs::create_dir_all, panic, path::Path, process::exit, str::FromStr, thread, time::Duration}; +use std::{ + fs::{canonicalize, create_dir_all}, + panic, + path::Path, + process::exit, + str::FromStr, + thread, + time::Duration, +}; #[macro_use] mod error; @@ -37,9 +44,11 @@ mod util; pub use config::CONFIG; pub use error::{Error, MapResult}; +use rocket::data::{Limits, ToByteUnit}; pub use util::is_running_in_docker; -fn main() { +#[rocket::main] +async fn main() -> Result<(), Error> { parse_args(); launch_info(); @@ -56,13 +65,16 @@ fn main() { }); check_web_vault(); - create_icon_cache_folder(); + create_dir(&CONFIG.icon_cache_folder(), "icon cache"); + create_dir(&CONFIG.tmp_folder(), "tmp folder"); + create_dir(&CONFIG.sends_folder(), "sends folder"); + create_dir(&CONFIG.attachments_folder(), "attachments folder"); let pool = create_db_pool(); - schedule_jobs(pool.clone()); - crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().unwrap()).unwrap(); + schedule_jobs(pool.clone()).await; + crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().await.unwrap()).unwrap(); - launch_rocket(pool, extra_debug); // Blocks until program termination. + launch_rocket(pool, extra_debug).await // Blocks until program termination. } const HELP: &str = "\ @@ -127,10 +139,12 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> { .level_for("hyper::server", log::LevelFilter::Warn) // Silence rocket logs .level_for("_", log::LevelFilter::Off) - .level_for("launch", log::LevelFilter::Off) - .level_for("launch_", log::LevelFilter::Off) - .level_for("rocket::rocket", log::LevelFilter::Off) - .level_for("rocket::fairing", log::LevelFilter::Off) + .level_for("rocket::launch", log::LevelFilter::Error) + .level_for("rocket::launch_", log::LevelFilter::Error) + .level_for("rocket::rocket", log::LevelFilter::Warn) + .level_for("rocket::server", log::LevelFilter::Warn) + .level_for("rocket::fairing::fairings", log::LevelFilter::Warn) + .level_for("rocket::shield::shield", log::LevelFilter::Warn) // Never show html5ever and hyper::proto logs, too noisy .level_for("html5ever", log::LevelFilter::Off) .level_for("hyper::proto", log::LevelFilter::Off) @@ -243,10 +257,6 @@ fn create_dir(path: &str, description: &str) { create_dir_all(path).expect(&err_msg); } -fn create_icon_cache_folder() { - create_dir(&CONFIG.icon_cache_folder(), "icon cache"); -} - fn check_data_folder() { let data_folder = &CONFIG.data_folder(); let path = Path::new(data_folder); @@ -314,51 +324,73 @@ fn create_db_pool() -> db::DbPool { } } -fn launch_rocket(pool: db::DbPool, extra_debug: bool) { +async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> { let basepath = &CONFIG.domain_path(); + let mut config = rocket::Config::from(rocket::Config::figment()); + config.address = std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED); // TODO: Allow this to be changed, keep ROCKET_ADDRESS for compat + config.temp_dir = canonicalize(CONFIG.tmp_folder()).unwrap().into(); + config.limits = Limits::new() // + .limit("json", 10.megabytes()) + .limit("data-form", 150.megabytes()) + .limit("file", 150.megabytes()); + // If adding more paths here, consider also adding them to // crate::utils::LOGGED_ROUTES to make sure they appear in the log - let result = rocket::ignite() - .mount(&[basepath, "/"].concat(), api::web_routes()) - .mount(&[basepath, "/api"].concat(), api::core_routes()) - .mount(&[basepath, "/admin"].concat(), api::admin_routes()) - .mount(&[basepath, "/identity"].concat(), api::identity_routes()) - .mount(&[basepath, "/icons"].concat(), api::icons_routes()) - .mount(&[basepath, "/notifications"].concat(), api::notifications_routes()) + let instance = rocket::custom(config) + .mount([basepath, "/"].concat(), api::web_routes()) + .mount([basepath, "/api"].concat(), api::core_routes()) + .mount([basepath, "/admin"].concat(), api::admin_routes()) + .mount([basepath, "/identity"].concat(), api::identity_routes()) + .mount([basepath, "/icons"].concat(), api::icons_routes()) + .mount([basepath, "/notifications"].concat(), api::notifications_routes()) .manage(pool) .manage(api::start_notification_server()) .attach(util::AppHeaders()) .attach(util::Cors()) .attach(util::BetterLogging(extra_debug)) - .launch(); + .ignite() + .await?; - // Launch and print error if there is one - // The launch will restore the original logging level - error!("Launch error {:#?}", result); + CONFIG.set_rocket_shutdown_handle(instance.shutdown()); + ctrlc::set_handler(move || { + info!("Exiting vaultwarden!"); + CONFIG.shutdown(); + }) + .expect("Error setting Ctrl-C handler"); + + instance.launch().await?; + + info!("Vaultwarden process exited!"); + Ok(()) } -fn schedule_jobs(pool: db::DbPool) { +async fn schedule_jobs(pool: db::DbPool) { if CONFIG.job_poll_interval_ms() == 0 { info!("Job scheduler disabled."); return; } + + let runtime = tokio::runtime::Handle::current(); + thread::Builder::new() .name("job-scheduler".to_string()) .spawn(move || { + use job_scheduler::{Job, JobScheduler}; + let mut sched = JobScheduler::new(); // Purge sends that are past their deletion date. if !CONFIG.send_purge_schedule().is_empty() { sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || { - api::purge_sends(pool.clone()); + runtime.spawn(api::purge_sends(pool.clone())); })); } // Purge trashed items that are old enough to be auto-deleted. if !CONFIG.trash_purge_schedule().is_empty() { sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || { - api::purge_trashed_ciphers(pool.clone()); + runtime.spawn(api::purge_trashed_ciphers(pool.clone())); })); } @@ -366,7 +398,7 @@ fn schedule_jobs(pool: db::DbPool) { // indicates that a user's master password has been compromised. if !CONFIG.incomplete_2fa_schedule().is_empty() { sched.add(Job::new(CONFIG.incomplete_2fa_schedule().parse().unwrap(), || { - api::send_incomplete_2fa_notifications(pool.clone()); + runtime.spawn(api::send_incomplete_2fa_notifications(pool.clone())); })); } @@ -375,7 +407,7 @@ fn schedule_jobs(pool: db::DbPool) { // sending reminders for requests that are about to be granted anyway. if !CONFIG.emergency_request_timeout_schedule().is_empty() { sched.add(Job::new(CONFIG.emergency_request_timeout_schedule().parse().unwrap(), || { - api::emergency_request_timeout_job(pool.clone()); + runtime.spawn(api::emergency_request_timeout_job(pool.clone())); })); } @@ -383,7 +415,7 @@ fn schedule_jobs(pool: db::DbPool) { // emergency access requests. if !CONFIG.emergency_notification_reminder_schedule().is_empty() { sched.add(Job::new(CONFIG.emergency_notification_reminder_schedule().parse().unwrap(), || { - api::emergency_notification_reminder_job(pool.clone()); + runtime.spawn(api::emergency_notification_reminder_job(pool.clone())); })); } diff --git a/src/util.rs b/src/util.rs index 4defc63f..323df413 100644 --- a/src/util.rs +++ b/src/util.rs @@ -5,10 +5,10 @@ use std::io::Cursor; use rocket::{ fairing::{Fairing, Info, Kind}, - http::{ContentType, Header, HeaderMap, Method, RawStr, Status}, + http::{ContentType, Header, HeaderMap, Method, Status}, request::FromParam, response::{self, Responder}, - Data, Request, Response, Rocket, + Data, Orbit, Request, Response, Rocket, }; use std::thread::sleep; @@ -18,6 +18,7 @@ use crate::CONFIG; pub struct AppHeaders(); +#[rocket::async_trait] impl Fairing for AppHeaders { fn info(&self) -> Info { Info { @@ -26,7 +27,7 @@ impl Fairing for AppHeaders { } } - fn on_response(&self, _req: &Request, res: &mut Response) { + async fn on_response<'r>(&self, _req: &'r Request<'_>, res: &mut Response<'r>) { res.set_raw_header("Permissions-Policy", "accelerometer=(), ambient-light-sensor=(), autoplay=(), camera=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), sync-xhr=(self \"https://haveibeenpwned.com\" \"https://2fa.directory\"), usb=(), vr=()"); res.set_raw_header("Referrer-Policy", "same-origin"); res.set_raw_header("X-Frame-Options", "SAMEORIGIN"); @@ -72,6 +73,7 @@ impl Cors { } } +#[rocket::async_trait] impl Fairing for Cors { fn info(&self) -> Info { Info { @@ -80,7 +82,7 @@ impl Fairing for Cors { } } - fn on_response(&self, request: &Request, response: &mut Response) { + async fn on_response<'r>(&self, request: &'r Request<'_>, response: &mut Response<'r>) { let req_headers = request.headers(); if let Some(origin) = Cors::get_allowed_origin(req_headers) { @@ -97,7 +99,7 @@ impl Fairing for Cors { response.set_header(Header::new("Access-Control-Allow-Credentials", "true")); response.set_status(Status::Ok); response.set_header(ContentType::Plain); - response.set_sized_body(Cursor::new("")); + response.set_sized_body(Some(0), Cursor::new("")); } } } @@ -134,25 +136,21 @@ impl Cached { } } -impl<'r, R: Responder<'r>> Responder<'r> for Cached { - fn respond_to(self, req: &Request) -> response::Result<'r> { +impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cached { + fn respond_to(self, request: &'r Request<'_>) -> response::Result<'static> { + let mut res = self.response.respond_to(request)?; + let cache_control_header = if self.is_immutable { format!("public, immutable, max-age={}", self.ttl) } else { format!("public, max-age={}", self.ttl) }; + res.set_raw_header("Cache-Control", cache_control_header); let time_now = chrono::Local::now(); - - match self.response.respond_to(req) { - Ok(mut res) => { - res.set_raw_header("Cache-Control", cache_control_header); - let expiry_time = time_now + chrono::Duration::seconds(self.ttl.try_into().unwrap()); - res.set_raw_header("Expires", format_datetime_http(&expiry_time)); - Ok(res) - } - e @ Err(_) => e, - } + let expiry_time = time_now + chrono::Duration::seconds(self.ttl.try_into().unwrap()); + res.set_raw_header("Expires", format_datetime_http(&expiry_time)); + Ok(res) } } @@ -175,11 +173,9 @@ impl<'r> FromParam<'r> for SafeString { type Error = (); #[inline(always)] - fn from_param(param: &'r RawStr) -> Result { - let s = param.percent_decode().map(|cow| cow.into_owned()).map_err(|_| ())?; - - if s.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) { - Ok(SafeString(s)) + fn from_param(param: &'r str) -> Result { + if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) { + Ok(SafeString(param.to_string())) } else { Err(()) } @@ -193,15 +189,16 @@ const LOGGED_ROUTES: [&str; 6] = // Boolean is extra debug, when true, we ignore the whitelist above and also print the mounts pub struct BetterLogging(pub bool); +#[rocket::async_trait] impl Fairing for BetterLogging { fn info(&self) -> Info { Info { name: "Better Logging", - kind: Kind::Launch | Kind::Request | Kind::Response, + kind: Kind::Liftoff | Kind::Request | Kind::Response, } } - fn on_launch(&self, rocket: &Rocket) { + async fn on_liftoff(&self, rocket: &Rocket) { if self.0 { info!(target: "routes", "Routes loaded:"); let mut routes: Vec<_> = rocket.routes().collect(); @@ -225,34 +222,36 @@ impl Fairing for BetterLogging { info!(target: "start", "Rocket has launched from {}", addr); } - fn on_request(&self, request: &mut Request<'_>, _data: &Data) { + async fn on_request(&self, request: &mut Request<'_>, _data: &mut Data<'_>) { let method = request.method(); if !self.0 && method == Method::Options { return; } let uri = request.uri(); let uri_path = uri.path(); - let uri_subpath = uri_path.strip_prefix(&CONFIG.domain_path()).unwrap_or(uri_path); + let uri_path_str = uri_path.url_decode_lossy(); + let uri_subpath = uri_path_str.strip_prefix(&CONFIG.domain_path()).unwrap_or(&uri_path_str); if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) { match uri.query() { - Some(q) => info!(target: "request", "{} {}?{}", method, uri_path, &q[..q.len().min(30)]), - None => info!(target: "request", "{} {}", method, uri_path), + Some(q) => info!(target: "request", "{} {}?{}", method, uri_path_str, &q[..q.len().min(30)]), + None => info!(target: "request", "{} {}", method, uri_path_str), }; } } - fn on_response(&self, request: &Request, response: &mut Response) { + async fn on_response<'r>(&self, request: &'r Request<'_>, response: &mut Response<'r>) { if !self.0 && request.method() == Method::Options { return; } let uri_path = request.uri().path(); - let uri_subpath = uri_path.strip_prefix(&CONFIG.domain_path()).unwrap_or(uri_path); + let uri_path_str = uri_path.url_decode_lossy(); + let uri_subpath = uri_path_str.strip_prefix(&CONFIG.domain_path()).unwrap_or(&uri_path_str); if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) { let status = response.status(); - if let Some(route) = request.route() { - info!(target: "response", "{} => {} {}", route, status.code, status.reason) + if let Some(ref route) = request.route() { + info!(target: "response", "{} => {}", route, status) } else { - info!(target: "response", "{} {}", status.code, status.reason) + info!(target: "response", "{}", status) } } } @@ -614,10 +613,7 @@ where } } -use reqwest::{ - blocking::{Client, ClientBuilder}, - header, -}; +use reqwest::{header, Client, ClientBuilder}; pub fn get_reqwest_client() -> Client { get_reqwest_client_builder().build().expect("Failed to build client") From 775d07e9a0d6e33f5af63abb571cae4e434795d9 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Tue, 16 Nov 2021 17:07:55 +0100 Subject: [PATCH 2/8] Async/Awaited all db methods This is a rather large PR which updates the async branch to have all the database methods as an async fn. Some iter/map logic needed to be changed to a stream::iter().then(), but besides that most changes were just adding async/await where needed. --- rustfmt.toml | 8 +- src/api/admin.rs | 147 +++---- src/api/core/accounts.rs | 124 +++--- src/api/core/ciphers.rs | 496 +++++++++++++---------- src/api/core/emergency_access.rs | 216 +++++----- src/api/core/folders.rs | 45 +- src/api/core/mod.rs | 8 +- src/api/core/organizations.rs | 419 ++++++++++--------- src/api/core/sends.rs | 96 +++-- src/api/core/two_factor/authenticator.rs | 37 +- src/api/core/two_factor/duo.rs | 31 +- src/api/core/two_factor/email.rs | 54 +-- src/api/core/two_factor/mod.rs | 48 +-- src/api/core/two_factor/u2f.rs | 61 +-- src/api/core/two_factor/webauthn.rs | 66 +-- src/api/core/two_factor/yubikey.rs | 23 +- src/api/icons.rs | 1 + src/api/identity.rs | 91 +++-- src/auth.rs | 12 +- src/db/mod.rs | 2 +- src/db/models/attachment.rs | 22 +- src/db/models/cipher.rs | 123 +++--- src/db/models/collection.rs | 98 ++--- src/db/models/device.rs | 18 +- src/db/models/emergency_access.rs | 42 +- src/db/models/favorite.rs | 14 +- src/db/models/folder.rs | 32 +- src/db/models/org_policy.rs | 34 +- src/db/models/organization.rs | 87 ++-- src/db/models/send.rs | 46 +-- src/db/models/two_factor.rs | 19 +- src/db/models/two_factor_incomplete.rs | 20 +- src/db/models/user.rs | 78 ++-- src/main.rs | 4 +- 34 files changed, 1424 insertions(+), 1198 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 630b42b2..0b46f6cb 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,7 +1,7 @@ -version = "Two" -edition = "2018" +#version = "One" +edition = "2021" max_width = 120 newline_style = "Unix" use_small_heuristics = "Off" -struct_lit_single_line = false -overflow_delimited_expr = true +#struct_lit_single_line = false +#overflow_delimited_expr = true diff --git a/src/api/admin.rs b/src/api/admin.rs index c25587d0..7d81ec7b 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -25,6 +25,8 @@ use crate::{ CONFIG, VERSION, }; +use futures::{stream, stream::StreamExt}; + pub fn routes() -> Vec { if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() { return routes![admin_disabled]; @@ -253,8 +255,8 @@ struct InviteData { email: String, } -fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult { - if let Some(user) = User::find_by_uuid(uuid, conn) { +async fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult { + if let Some(user) = User::find_by_uuid(uuid, conn).await { Ok(user) } else { err_code!("User doesn't exist", Status::NotFound.code); @@ -262,30 +264,28 @@ fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult { } #[post("/invite", data = "")] -fn invite_user(data: Json, _token: AdminToken, conn: DbConn) -> JsonResult { +async fn invite_user(data: Json, _token: AdminToken, conn: DbConn) -> JsonResult { let data: InviteData = data.into_inner(); let email = data.email.clone(); - if User::find_by_mail(&data.email, &conn).is_some() { + if User::find_by_mail(&data.email, &conn).await.is_some() { err_code!("User already exists", Status::Conflict.code) } let mut user = User::new(email); - // TODO: After try_blocks is stabilized, this can be made more readable - // See: https://github.com/rust-lang/rust/issues/31436 - (|| { + async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult { if CONFIG.mail_enabled() { - mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)?; + mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None) } else { let invitation = Invitation::new(user.email.clone()); - invitation.save(&conn)?; + invitation.save(conn).await } + } - user.save(&conn) - })() - .map_err(|e| e.with_code(Status::InternalServerError.code))?; + _generate_invite(&user, &conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; + user.save(&conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; - Ok(Json(user.to_json(&conn))) + Ok(Json(user.to_json(&conn).await)) } #[post("/test/smtp", data = "")] @@ -306,84 +306,90 @@ fn logout(cookies: &CookieJar, referer: Referer) -> Redirect { } #[get("/users")] -fn get_users_json(_token: AdminToken, conn: DbConn) -> Json { - let users = User::get_all(&conn); - let users_json: Vec = users.iter().map(|u| u.to_json(&conn)).collect(); +async fn get_users_json(_token: AdminToken, conn: DbConn) -> Json { + let users_json = stream::iter(User::get_all(&conn).await) + .then(|u| async { + let u = u; // Move out this single variable + u.to_json(&conn).await + }) + .collect::>() + .await; Json(Value::Array(users_json)) } #[get("/users/overview")] -fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult> { - let users = User::get_all(&conn); - let dt_fmt = "%Y-%m-%d %H:%M:%S %Z"; - let users_json: Vec = users - .iter() - .map(|u| { - let mut usr = u.to_json(&conn); - usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn)); - usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn)); - usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32)); +async fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult> { + const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z"; + + let users_json = stream::iter(User::get_all(&conn).await) + .then(|u| async { + let u = u; // Move out this single variable + let mut usr = u.to_json(&conn).await; + usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn).await); + usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn).await); + usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn).await as i32)); usr["user_enabled"] = json!(u.enabled); - usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt)); - usr["last_active"] = match u.last_active(&conn) { - Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)), + usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); + usr["last_active"] = match u.last_active(&conn).await { + Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)), None => json!("Never"), }; usr }) - .collect(); + .collect::>() + .await; let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?; Ok(Html(text)) } #[get("/users/")] -fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult { - let user = get_user_or_404(&uuid, &conn)?; +async fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult { + let user = get_user_or_404(&uuid, &conn).await?; - Ok(Json(user.to_json(&conn))) + Ok(Json(user.to_json(&conn).await)) } #[post("/users//delete")] -fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { - let user = get_user_or_404(&uuid, &conn)?; - user.delete(&conn) +async fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { + let user = get_user_or_404(&uuid, &conn).await?; + user.delete(&conn).await } #[post("/users//deauth")] -fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(&uuid, &conn)?; - Device::delete_all_by_user(&user.uuid, &conn)?; +async fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&uuid, &conn).await?; + Device::delete_all_by_user(&user.uuid, &conn).await?; user.reset_security_stamp(); - user.save(&conn) + user.save(&conn).await } #[post("/users//disable")] -fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(&uuid, &conn)?; - Device::delete_all_by_user(&user.uuid, &conn)?; +async fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&uuid, &conn).await?; + Device::delete_all_by_user(&user.uuid, &conn).await?; user.reset_security_stamp(); user.enabled = false; - user.save(&conn) + user.save(&conn).await } #[post("/users//enable")] -fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(&uuid, &conn)?; +async fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&uuid, &conn).await?; user.enabled = true; - user.save(&conn) + user.save(&conn).await } #[post("/users//remove-2fa")] -fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(&uuid, &conn)?; - TwoFactor::delete_all_by_user(&user.uuid, &conn)?; +async fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&uuid, &conn).await?; + TwoFactor::delete_all_by_user(&user.uuid, &conn).await?; user.totp_recover = None; - user.save(&conn) + user.save(&conn).await } #[derive(Deserialize, Debug)] @@ -394,10 +400,10 @@ struct UserOrgTypeData { } #[post("/users/org_type", data = "")] -fn update_user_org_type(data: Json, _token: AdminToken, conn: DbConn) -> EmptyResult { +async fn update_user_org_type(data: Json, _token: AdminToken, conn: DbConn) -> EmptyResult { let data: UserOrgTypeData = data.into_inner(); - let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) { + let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn).await { Some(user) => user, None => err!("The specified user isn't member of the organization"), }; @@ -409,7 +415,8 @@ fn update_user_org_type(data: Json, _token: AdminToken, conn: D if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner { // Removing owner permmission, check that there are at least another owner - let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len(); + let num_owners = + UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).await.len(); if num_owners <= 1 { err!("Can't change the type of the last owner") @@ -417,37 +424,37 @@ fn update_user_org_type(data: Json, _token: AdminToken, conn: D } user_to_edit.atype = new_type as i32; - user_to_edit.save(&conn) + user_to_edit.save(&conn).await } #[post("/users/update_revision")] -fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult { - User::update_all_revisions(&conn) +async fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult { + User::update_all_revisions(&conn).await } #[get("/organizations/overview")] -fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult> { - let organizations = Organization::get_all(&conn); - let organizations_json: Vec = organizations - .iter() - .map(|o| { +async fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult> { + let organizations_json = stream::iter(Organization::get_all(&conn).await) + .then(|o| async { + let o = o; //Move out this single variable let mut org = o.to_json(); - org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn)); - org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn)); - org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn)); - org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32)); + org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn).await); + org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn).await); + org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn).await); + org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn).await as i32)); org }) - .collect(); + .collect::>() + .await; let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?; Ok(Html(text)) } #[post("/organizations//delete")] -fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { - let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?; - org.delete(&conn) +async fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult { + let org = Organization::find_by_uuid(&uuid, &conn).await.map_res("Organization doesn't exist")?; + org.delete(&conn).await } #[derive(Deserialize)] diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs index d790f67d..af4e0796 100644 --- a/src/api/core/accounts.rs +++ b/src/api/core/accounts.rs @@ -63,11 +63,11 @@ struct KeysData { } #[post("/accounts/register", data = "")] -fn register(data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn register(data: JsonUpcase, conn: DbConn) -> EmptyResult { let data: RegisterData = data.into_inner().data; let email = data.Email.to_lowercase(); - let mut user = match User::find_by_mail(&email, &conn) { + let mut user = match User::find_by_mail(&email, &conn).await { Some(user) => { if !user.password_hash.is_empty() { if CONFIG.is_signup_allowed(&email) { @@ -84,13 +84,13 @@ fn register(data: JsonUpcase, conn: DbConn) -> EmptyResult { } else { err!("Registration email does not match invite email") } - } else if Invitation::take(&email, &conn) { - for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() { + } else if Invitation::take(&email, &conn).await { + for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).await.iter_mut() { user_org.status = UserOrgStatus::Accepted as i32; - user_org.save(&conn)?; + user_org.save(&conn).await?; } user - } else if EmergencyAccess::find_invited_by_grantee_email(&email, &conn).is_some() { + } else if EmergencyAccess::find_invited_by_grantee_email(&email, &conn).await.is_some() { user } else if CONFIG.is_signup_allowed(&email) { err!("Account with this email already exists") @@ -102,7 +102,7 @@ fn register(data: JsonUpcase, conn: DbConn) -> EmptyResult { // Order is important here; the invitation check must come first // because the vaultwarden admin can invite anyone, regardless // of other signup restrictions. - if Invitation::take(&email, &conn) || CONFIG.is_signup_allowed(&email) { + if Invitation::take(&email, &conn).await || CONFIG.is_signup_allowed(&email) { User::new(email.clone()) } else { err!("Registration not allowed or user already exists") @@ -111,7 +111,7 @@ fn register(data: JsonUpcase, conn: DbConn) -> EmptyResult { }; // Make sure we don't leave a lingering invitation. - Invitation::take(&email, &conn); + Invitation::take(&email, &conn).await; if let Some(client_kdf_iter) = data.KdfIterations { user.client_kdf_iter = client_kdf_iter; @@ -150,12 +150,12 @@ fn register(data: JsonUpcase, conn: DbConn) -> EmptyResult { } } - user.save(&conn) + user.save(&conn).await } #[get("/accounts/profile")] -fn profile(headers: Headers, conn: DbConn) -> Json { - Json(headers.user.to_json(&conn)) +async fn profile(headers: Headers, conn: DbConn) -> Json { + Json(headers.user.to_json(&conn).await) } #[derive(Deserialize, Debug)] @@ -168,12 +168,12 @@ struct ProfileData { } #[put("/accounts/profile", data = "")] -fn put_profile(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - post_profile(data, headers, conn) +async fn put_profile(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + post_profile(data, headers, conn).await } #[post("/accounts/profile", data = "")] -fn post_profile(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn post_profile(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: ProfileData = data.into_inner().data; let mut user = headers.user; @@ -183,13 +183,13 @@ fn post_profile(data: JsonUpcase, headers: Headers, conn: DbConn) - Some(ref h) if h.is_empty() => None, _ => data.MasterPasswordHint, }; - user.save(&conn)?; - Ok(Json(user.to_json(&conn))) + user.save(&conn).await?; + Ok(Json(user.to_json(&conn).await)) } #[get("/users//public-key")] -fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult { - let user = match User::find_by_uuid(&uuid, &conn) { +async fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult { + let user = match User::find_by_uuid(&uuid, &conn).await { Some(user) => user, None => err!("User doesn't exist"), }; @@ -202,7 +202,7 @@ fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult } #[post("/accounts/keys", data = "")] -fn post_keys(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn post_keys(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: KeysData = data.into_inner().data; let mut user = headers.user; @@ -210,7 +210,7 @@ fn post_keys(data: JsonUpcase, headers: Headers, conn: DbConn) -> Json user.private_key = Some(data.EncryptedPrivateKey); user.public_key = Some(data.PublicKey); - user.save(&conn)?; + user.save(&conn).await?; Ok(Json(json!({ "PrivateKey": user.private_key, @@ -228,7 +228,7 @@ struct ChangePassData { } #[post("/accounts/password", data = "")] -fn post_password(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_password(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data: ChangePassData = data.into_inner().data; let mut user = headers.user; @@ -241,7 +241,7 @@ fn post_password(data: JsonUpcase, headers: Headers, conn: DbCon Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]), ); user.akey = data.Key; - user.save(&conn) + user.save(&conn).await } #[derive(Deserialize)] @@ -256,7 +256,7 @@ struct ChangeKdfData { } #[post("/accounts/kdf", data = "")] -fn post_kdf(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_kdf(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data: ChangeKdfData = data.into_inner().data; let mut user = headers.user; @@ -268,7 +268,7 @@ fn post_kdf(data: JsonUpcase, headers: Headers, conn: DbConn) -> user.client_kdf_type = data.Kdf; user.set_password(&data.NewMasterPasswordHash, None); user.akey = data.Key; - user.save(&conn) + user.save(&conn).await } #[derive(Deserialize)] @@ -291,7 +291,7 @@ struct KeyData { } #[post("/accounts/key", data = "")] -fn post_rotatekey(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { +async fn post_rotatekey(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { let data: KeyData = data.into_inner().data; if !headers.user.check_valid_password(&data.MasterPasswordHash) { @@ -302,7 +302,7 @@ fn post_rotatekey(data: JsonUpcase, headers: Headers, conn: DbConn, nt: // Update folder data for folder_data in data.Folders { - let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn) { + let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn).await { Some(folder) => folder, None => err!("Folder doesn't exist"), }; @@ -312,14 +312,14 @@ fn post_rotatekey(data: JsonUpcase, headers: Headers, conn: DbConn, nt: } saved_folder.name = folder_data.Name; - saved_folder.save(&conn)? + saved_folder.save(&conn).await? } // Update cipher data use super::ciphers::update_cipher_from_data; for cipher_data in data.Ciphers { - let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn) { + let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; @@ -330,7 +330,7 @@ fn post_rotatekey(data: JsonUpcase, headers: Headers, conn: DbConn, nt: // Prevent triggering cipher updates via WebSockets by settings UpdateType::None // The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues. - update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None)? + update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None).await? } // Update user data @@ -340,11 +340,11 @@ fn post_rotatekey(data: JsonUpcase, headers: Headers, conn: DbConn, nt: user.private_key = Some(data.PrivateKey); user.reset_security_stamp(); - user.save(&conn) + user.save(&conn).await } #[post("/accounts/security-stamp", data = "")] -fn post_sstamp(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_sstamp(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data: PasswordData = data.into_inner().data; let mut user = headers.user; @@ -352,9 +352,9 @@ fn post_sstamp(data: JsonUpcase, headers: Headers, conn: DbConn) - err!("Invalid password") } - Device::delete_all_by_user(&user.uuid, &conn)?; + Device::delete_all_by_user(&user.uuid, &conn).await?; user.reset_security_stamp(); - user.save(&conn) + user.save(&conn).await } #[derive(Deserialize)] @@ -365,7 +365,7 @@ struct EmailTokenData { } #[post("/accounts/email-token", data = "")] -fn post_email_token(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_email_token(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data: EmailTokenData = data.into_inner().data; let mut user = headers.user; @@ -373,7 +373,7 @@ fn post_email_token(data: JsonUpcase, headers: Headers, conn: Db err!("Invalid password") } - if User::find_by_mail(&data.NewEmail, &conn).is_some() { + if User::find_by_mail(&data.NewEmail, &conn).await.is_some() { err!("Email already in use"); } @@ -391,7 +391,7 @@ fn post_email_token(data: JsonUpcase, headers: Headers, conn: Db user.email_new = Some(data.NewEmail); user.email_new_token = Some(token); - user.save(&conn) + user.save(&conn).await } #[derive(Deserialize)] @@ -406,7 +406,7 @@ struct ChangeEmailData { } #[post("/accounts/email", data = "")] -fn post_email(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_email(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data: ChangeEmailData = data.into_inner().data; let mut user = headers.user; @@ -414,7 +414,7 @@ fn post_email(data: JsonUpcase, headers: Headers, conn: DbConn) err!("Invalid password") } - if User::find_by_mail(&data.NewEmail, &conn).is_some() { + if User::find_by_mail(&data.NewEmail, &conn).await.is_some() { err!("Email already in use"); } @@ -449,7 +449,7 @@ fn post_email(data: JsonUpcase, headers: Headers, conn: DbConn) user.set_password(&data.NewMasterPasswordHash, None); user.akey = data.Key; - user.save(&conn) + user.save(&conn).await } #[post("/accounts/verify-email")] @@ -475,10 +475,10 @@ struct VerifyEmailTokenData { } #[post("/accounts/verify-email-token", data = "")] -fn post_verify_email_token(data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn post_verify_email_token(data: JsonUpcase, conn: DbConn) -> EmptyResult { let data: VerifyEmailTokenData = data.into_inner().data; - let mut user = match User::find_by_uuid(&data.UserId, &conn) { + let mut user = match User::find_by_uuid(&data.UserId, &conn).await { Some(user) => user, None => err!("User doesn't exist"), }; @@ -493,7 +493,7 @@ fn post_verify_email_token(data: JsonUpcase, conn: DbConn) user.verified_at = Some(Utc::now().naive_utc()); user.last_verifying_at = None; user.login_verify_count = 0; - if let Err(e) = user.save(&conn) { + if let Err(e) = user.save(&conn).await { error!("Error saving email verification: {:#?}", e); } @@ -507,13 +507,11 @@ struct DeleteRecoverData { } #[post("/accounts/delete-recover", data = "")] -fn post_delete_recover(data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn post_delete_recover(data: JsonUpcase, conn: DbConn) -> EmptyResult { let data: DeleteRecoverData = data.into_inner().data; - let user = User::find_by_mail(&data.Email, &conn); - if CONFIG.mail_enabled() { - if let Some(user) = user { + if let Some(user) = User::find_by_mail(&data.Email, &conn).await { if let Err(e) = mail::send_delete_account(&user.email, &user.uuid) { error!("Error sending delete account email: {:#?}", e); } @@ -536,10 +534,10 @@ struct DeleteRecoverTokenData { } #[post("/accounts/delete-recover-token", data = "")] -fn post_delete_recover_token(data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn post_delete_recover_token(data: JsonUpcase, conn: DbConn) -> EmptyResult { let data: DeleteRecoverTokenData = data.into_inner().data; - let user = match User::find_by_uuid(&data.UserId, &conn) { + let user = match User::find_by_uuid(&data.UserId, &conn).await { Some(user) => user, None => err!("User doesn't exist"), }; @@ -551,16 +549,16 @@ fn post_delete_recover_token(data: JsonUpcase, conn: DbC if claims.sub != user.uuid { err!("Invalid claim"); } - user.delete(&conn) + user.delete(&conn).await } #[post("/accounts/delete", data = "")] -fn post_delete_account(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { - delete_account(data, headers, conn) +async fn post_delete_account(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { + delete_account(data, headers, conn).await } #[delete("/accounts", data = "")] -fn delete_account(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn delete_account(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data: PasswordData = data.into_inner().data; let user = headers.user; @@ -568,7 +566,7 @@ fn delete_account(data: JsonUpcase, headers: Headers, conn: DbConn err!("Invalid password") } - user.delete(&conn) + user.delete(&conn).await } #[get("/accounts/revision-date")] @@ -584,7 +582,7 @@ struct PasswordHintData { } #[post("/accounts/password-hint", data = "")] -fn password_hint(data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn password_hint(data: JsonUpcase, conn: DbConn) -> EmptyResult { if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() { err!("This server is not configured to provide password hints."); } @@ -594,7 +592,7 @@ fn password_hint(data: JsonUpcase, conn: DbConn) -> EmptyResul let data: PasswordHintData = data.into_inner().data; let email = &data.Email; - match User::find_by_mail(email, &conn) { + match User::find_by_mail(email, &conn).await { None => { // To prevent user enumeration, act as if the user exists. if CONFIG.mail_enabled() { @@ -633,10 +631,10 @@ struct PreloginData { } #[post("/accounts/prelogin", data = "")] -fn prelogin(data: JsonUpcase, conn: DbConn) -> Json { +async fn prelogin(data: JsonUpcase, conn: DbConn) -> Json { let data: PreloginData = data.into_inner().data; - let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) { + let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn).await { Some(user) => (user.client_kdf_type, user.client_kdf_iter), None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT), }; @@ -666,7 +664,7 @@ fn verify_password(data: JsonUpcase, headers: Headers Ok(()) } -fn _api_key(data: JsonUpcase, rotate: bool, headers: Headers, conn: DbConn) -> JsonResult { +async fn _api_key(data: JsonUpcase, rotate: bool, headers: Headers, conn: DbConn) -> JsonResult { let data: SecretVerificationRequest = data.into_inner().data; let mut user = headers.user; @@ -676,7 +674,7 @@ fn _api_key(data: JsonUpcase, rotate: bool, headers: if rotate || user.api_key.is_none() { user.api_key = Some(crypto::generate_api_key()); - user.save(&conn).expect("Error saving API key"); + user.save(&conn).await.expect("Error saving API key"); } Ok(Json(json!({ @@ -686,11 +684,11 @@ fn _api_key(data: JsonUpcase, rotate: bool, headers: } #[post("/accounts/api-key", data = "")] -fn api_key(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - _api_key(data, false, headers, conn) +async fn api_key(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + _api_key(data, false, headers, conn).await } #[post("/accounts/rotate-api-key", data = "")] -fn rotate_api_key(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - _api_key(data, true, headers, conn) +async fn rotate_api_key(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + _api_key(data, true, headers, conn).await } diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index 1e6d6b1b..b84002aa 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -17,6 +17,8 @@ use crate::{ CONFIG, }; +use futures::{stream, stream::StreamExt}; + pub fn routes() -> Vec { // Note that many routes have an `admin` variant; this seems to be // because the stored procedure that upstream Bitwarden uses to determine @@ -83,7 +85,7 @@ pub fn routes() -> Vec { pub async fn purge_trashed_ciphers(pool: DbPool) { debug!("Purging trashed ciphers"); if let Ok(conn) = pool.get().await { - Cipher::purge_trash(&conn); + Cipher::purge_trash(&conn).await; } else { error!("Failed to get DB connection while purging trashed ciphers") } @@ -96,25 +98,33 @@ struct SyncData { } #[get("/sync?")] -fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json { - let user_json = headers.user.to_json(&conn); +async fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json { + let user_json = headers.user.to_json(&conn).await; - let folders = Folder::find_by_user(&headers.user.uuid, &conn); + let folders = Folder::find_by_user(&headers.user.uuid, &conn).await; let folders_json: Vec = folders.iter().map(Folder::to_json).collect(); - let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn); - let collections_json: Vec = - collections.iter().map(|c| c.to_json_details(&headers.user.uuid, &conn)).collect(); + let collections_json = stream::iter(Collection::find_by_user_uuid(&headers.user.uuid, &conn).await) + .then(|c| async { + let c = c; // Move out this single variable + c.to_json_details(&headers.user.uuid, &conn).await + }) + .collect::>() + .await; let policies = OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn); - let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); + let policies_json: Vec = policies.await.iter().map(OrgPolicy::to_json).collect(); - let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn); - let ciphers_json: Vec = - ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect(); + let ciphers_json = stream::iter(Cipher::find_by_user_visible(&headers.user.uuid, &conn).await) + .then(|c| async { + let c = c; // Move out this single variable + c.to_json(&headers.host, &headers.user.uuid, &conn).await + }) + .collect::>() + .await; let sends = Send::find_by_user(&headers.user.uuid, &conn); - let sends_json: Vec = sends.iter().map(|s| s.to_json()).collect(); + let sends_json: Vec = sends.await.iter().map(|s| s.to_json()).collect(); let domains_json = if data.exclude_domains { Value::Null @@ -136,11 +146,14 @@ fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json { } #[get("/ciphers")] -fn get_ciphers(headers: Headers, conn: DbConn) -> Json { - let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn); - - let ciphers_json: Vec = - ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect(); +async fn get_ciphers(headers: Headers, conn: DbConn) -> Json { + let ciphers_json = stream::iter(Cipher::find_by_user_visible(&headers.user.uuid, &conn).await) + .then(|c| async { + let c = c; // Move out this single variable + c.to_json(&headers.host, &headers.user.uuid, &conn).await + }) + .collect::>() + .await; Json(json!({ "Data": ciphers_json, @@ -150,28 +163,28 @@ fn get_ciphers(headers: Headers, conn: DbConn) -> Json { } #[get("/ciphers/")] -fn get_cipher(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { - let cipher = match Cipher::find_by_uuid(&uuid, &conn) { +async fn get_cipher(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { + let cipher = match Cipher::find_by_uuid(&uuid, &conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_accessible_to_user(&headers.user.uuid, &conn) { + if !cipher.is_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not owned by user") } - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await)) } #[get("/ciphers//admin")] -fn get_cipher_admin(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { +async fn get_cipher_admin(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { // TODO: Implement this correctly - get_cipher(uuid, headers, conn) + get_cipher(uuid, headers, conn).await } #[get("/ciphers//details")] -fn get_cipher_details(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { - get_cipher(uuid, headers, conn) +async fn get_cipher_details(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { + get_cipher(uuid, headers, conn).await } #[derive(Deserialize, Debug)] @@ -229,15 +242,25 @@ pub struct Attachments2Data { /// Called when an org admin clones an org cipher. #[post("/ciphers/admin", data = "")] -fn post_ciphers_admin(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - post_ciphers_create(data, headers, conn, nt) +async fn post_ciphers_admin( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + post_ciphers_create(data, headers, conn, nt).await } /// Called when creating a new org-owned cipher, or cloning a cipher (whether /// user- or org-owned). When cloning a cipher to a user-owned cipher, /// `organizationId` is null. #[post("/ciphers/create", data = "")] -fn post_ciphers_create(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { +async fn post_ciphers_create( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { let mut data: ShareCipherData = data.into_inner().data; // Check if there are one more more collections selected when this cipher is part of an organization. @@ -249,11 +272,11 @@ fn post_ciphers_create(data: JsonUpcase, headers: Headers, conn // This check is usually only needed in update_cipher_from_data(), but we // need it here as well to avoid creating an empty cipher in the call to // cipher.save() below. - enforce_personal_ownership_policy(Some(&data.Cipher), &headers, &conn)?; + enforce_personal_ownership_policy(Some(&data.Cipher), &headers, &conn).await?; let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone()); cipher.user_uuid = Some(headers.user.uuid.clone()); - cipher.save(&conn)?; + cipher.save(&conn).await?; // When cloning a cipher, the Bitwarden clients seem to set this field // based on the cipher being cloned (when creating a new cipher, it's set @@ -263,12 +286,12 @@ fn post_ciphers_create(data: JsonUpcase, headers: Headers, conn // or otherwise), we can just ignore this field entirely. data.Cipher.LastKnownRevisionDate = None; - share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt) + share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt).await } /// Called when creating a new user-owned cipher. #[post("/ciphers", data = "")] -fn post_ciphers(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { +async fn post_ciphers(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { let mut data: CipherData = data.into_inner().data; // The web/browser clients set this field to null as expected, but the @@ -278,9 +301,9 @@ fn post_ciphers(data: JsonUpcase, headers: Headers, conn: DbConn, nt data.LastKnownRevisionDate = None; let mut cipher = Cipher::new(data.Type, data.Name.clone()); - update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherCreate)?; + update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherCreate).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await)) } /// Enforces the personal ownership policy on user-owned ciphers, if applicable. @@ -290,27 +313,27 @@ fn post_ciphers(data: JsonUpcase, headers: Headers, conn: DbConn, nt /// allowed to delete or share such ciphers to an org, however. /// /// Ref: https://bitwarden.com/help/article/policies/#personal-ownership -fn enforce_personal_ownership_policy(data: Option<&CipherData>, headers: &Headers, conn: &DbConn) -> EmptyResult { +async fn enforce_personal_ownership_policy(data: Option<&CipherData>, headers: &Headers, conn: &DbConn) -> EmptyResult { if data.is_none() || data.unwrap().OrganizationId.is_none() { let user_uuid = &headers.user.uuid; let policy_type = OrgPolicyType::PersonalOwnership; - if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) { + if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn).await { err!("Due to an Enterprise Policy, you are restricted from saving items to your personal vault.") } } Ok(()) } -pub fn update_cipher_from_data( +pub async fn update_cipher_from_data( cipher: &mut Cipher, data: CipherData, headers: &Headers, shared_to_collection: bool, conn: &DbConn, - nt: &Notify, + nt: &Notify<'_>, ut: UpdateType, ) -> EmptyResult { - enforce_personal_ownership_policy(Some(&data), headers, conn)?; + enforce_personal_ownership_policy(Some(&data), headers, conn).await?; // Check that the client isn't updating an existing cipher with stale data. if let Some(dt) = data.LastKnownRevisionDate { @@ -329,12 +352,12 @@ pub fn update_cipher_from_data( } if let Some(org_id) = data.OrganizationId { - match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn) { + match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await { None => err!("You don't have permission to add item to organization"), Some(org_user) => { if shared_to_collection || org_user.has_full_access() - || cipher.is_write_accessible_to_user(&headers.user.uuid, conn) + || cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { cipher.organization_uuid = Some(org_id); // After some discussion in PR #1329 re-added the user_uuid = None again. @@ -353,7 +376,7 @@ pub fn update_cipher_from_data( } if let Some(ref folder_id) = data.FolderId { - match Folder::find_by_uuid(folder_id, conn) { + match Folder::find_by_uuid(folder_id, conn).await { Some(folder) => { if folder.user_uuid != headers.user.uuid { err!("Folder is not owned by user") @@ -366,7 +389,7 @@ pub fn update_cipher_from_data( // Modify attachments name and keys when rotating if let Some(attachments) = data.Attachments2 { for (id, attachment) in attachments { - let mut saved_att = match Attachment::find_by_id(&id, conn) { + let mut saved_att = match Attachment::find_by_id(&id, conn).await { Some(att) => att, None => err!("Attachment doesn't exist"), }; @@ -381,7 +404,7 @@ pub fn update_cipher_from_data( saved_att.akey = Some(attachment.Key); saved_att.file_name = attachment.FileName; - saved_att.save(conn)?; + saved_att.save(conn).await?; } } @@ -427,12 +450,12 @@ pub fn update_cipher_from_data( cipher.password_history = data.PasswordHistory.map(|f| f.to_string()); cipher.reprompt = data.Reprompt; - cipher.save(conn)?; - cipher.move_to_folder(data.FolderId, &headers.user.uuid, conn)?; - cipher.set_favorite(data.Favorite, &headers.user.uuid, conn)?; + cipher.save(conn).await?; + cipher.move_to_folder(data.FolderId, &headers.user.uuid, conn).await?; + cipher.set_favorite(data.Favorite, &headers.user.uuid, conn).await?; if ut != UpdateType::None { - nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn)); + nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await); } Ok(()) @@ -458,8 +481,13 @@ struct RelationsData { } #[post("/ciphers/import", data = "")] -fn post_ciphers_import(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - enforce_personal_ownership_policy(None, &headers, &conn)?; +async fn post_ciphers_import( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + enforce_personal_ownership_policy(None, &headers, &conn).await?; let data: ImportData = data.into_inner().data; @@ -467,7 +495,7 @@ fn post_ciphers_import(data: JsonUpcase, headers: Headers, conn: DbC let mut folders: Vec<_> = Vec::new(); for folder in data.Folders.into_iter() { let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.Name); - new_folder.save(&conn)?; + new_folder.save(&conn).await?; folders.push(new_folder); } @@ -485,48 +513,60 @@ fn post_ciphers_import(data: JsonUpcase, headers: Headers, conn: DbC cipher_data.FolderId = folder_uuid; let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); - update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None)?; + update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None).await?; } let mut user = headers.user; - user.update_revision(&conn)?; + user.update_revision(&conn).await?; nt.send_user_update(UpdateType::Vault, &user); Ok(()) } /// Called when an org admin modifies an existing org cipher. #[put("/ciphers//admin", data = "")] -fn put_cipher_admin( +async fn put_cipher_admin( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> JsonResult { - put_cipher(uuid, data, headers, conn, nt) + put_cipher(uuid, data, headers, conn, nt).await } #[post("/ciphers//admin", data = "")] -fn post_cipher_admin( +async fn post_cipher_admin( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> JsonResult { - post_cipher(uuid, data, headers, conn, nt) + post_cipher(uuid, data, headers, conn, nt).await } #[post("/ciphers/", data = "")] -fn post_cipher(uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - put_cipher(uuid, data, headers, conn, nt) +async fn post_cipher( + uuid: String, + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + put_cipher(uuid, data, headers, conn, nt).await } #[put("/ciphers/", data = "")] -fn put_cipher(uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { +async fn put_cipher( + uuid: String, + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { let data: CipherData = data.into_inner().data; - let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) { + let mut cipher = match Cipher::find_by_uuid(&uuid, &conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; @@ -536,13 +576,13 @@ fn put_cipher(uuid: String, data: JsonUpcase, headers: Headers, conn // cipher itself, so the user shouldn't need write access to change these. // Interestingly, upstream Bitwarden doesn't properly handle this either. - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } - update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherUpdate)?; + update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherUpdate).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await)) } #[derive(Deserialize)] @@ -552,37 +592,37 @@ struct CollectionsAdminData { } #[put("/ciphers//collections", data = "")] -fn put_collections_update( +async fn put_collections_update( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, ) -> EmptyResult { - post_collections_admin(uuid, data, headers, conn) + post_collections_admin(uuid, data, headers, conn).await } #[post("/ciphers//collections", data = "")] -fn post_collections_update( +async fn post_collections_update( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, ) -> EmptyResult { - post_collections_admin(uuid, data, headers, conn) + post_collections_admin(uuid, data, headers, conn).await } #[put("/ciphers//collections-admin", data = "")] -fn put_collections_admin( +async fn put_collections_admin( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, ) -> EmptyResult { - post_collections_admin(uuid, data, headers, conn) + post_collections_admin(uuid, data, headers, conn).await } #[post("/ciphers//collections-admin", data = "")] -fn post_collections_admin( +async fn post_collections_admin( uuid: String, data: JsonUpcase, headers: Headers, @@ -590,30 +630,30 @@ fn post_collections_admin( ) -> EmptyResult { let data: CollectionsAdminData = data.into_inner().data; - let cipher = match Cipher::find_by_uuid(&uuid, &conn) { + let cipher = match Cipher::find_by_uuid(&uuid, &conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } let posted_collections: HashSet = data.CollectionIds.iter().cloned().collect(); let current_collections: HashSet = - cipher.get_collections(&headers.user.uuid, &conn).iter().cloned().collect(); + cipher.get_collections(&headers.user.uuid, &conn).await.iter().cloned().collect(); for collection in posted_collections.symmetric_difference(¤t_collections) { - match Collection::find_by_uuid(collection, &conn) { + match Collection::find_by_uuid(collection, &conn).await { None => err!("Invalid collection ID provided"), Some(collection) => { - if collection.is_writable_by_user(&headers.user.uuid, &conn) { + if collection.is_writable_by_user(&headers.user.uuid, &conn).await { if posted_collections.contains(&collection.uuid) { // Add to collection - CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn)?; + CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn).await?; } else { // Remove from collection - CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn)?; + CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn).await?; } } else { err!("No rights to modify the collection") @@ -633,29 +673,29 @@ struct ShareCipherData { } #[post("/ciphers//share", data = "")] -fn post_cipher_share( +async fn post_cipher_share( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> JsonResult { let data: ShareCipherData = data.into_inner().data; - share_cipher_by_uuid(&uuid, data, &headers, &conn, &nt) + share_cipher_by_uuid(&uuid, data, &headers, &conn, &nt).await } #[put("/ciphers//share", data = "")] -fn put_cipher_share( +async fn put_cipher_share( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> JsonResult { let data: ShareCipherData = data.into_inner().data; - share_cipher_by_uuid(&uuid, data, &headers, &conn, &nt) + share_cipher_by_uuid(&uuid, data, &headers, &conn, &nt).await } #[derive(Deserialize)] @@ -666,11 +706,11 @@ struct ShareSelectedCipherData { } #[put("/ciphers/share", data = "")] -fn put_cipher_share_selected( +async fn put_cipher_share_selected( data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { let mut data: ShareSelectedCipherData = data.into_inner().data; let mut cipher_ids: Vec = Vec::new(); @@ -697,7 +737,7 @@ fn put_cipher_share_selected( }; match shared_cipher_data.Cipher.Id.take() { - Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &conn, &nt)?, + Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &conn, &nt).await?, None => err!("Request missing ids field"), }; } @@ -705,16 +745,16 @@ fn put_cipher_share_selected( Ok(()) } -fn share_cipher_by_uuid( +async fn share_cipher_by_uuid( uuid: &str, data: ShareCipherData, headers: &Headers, conn: &DbConn, - nt: &Notify, + nt: &Notify<'_>, ) -> JsonResult { - let mut cipher = match Cipher::find_by_uuid(uuid, conn) { + let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { Some(cipher) => { - if cipher.is_write_accessible_to_user(&headers.user.uuid, conn) { + if cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { cipher } else { err!("Cipher is not write accessible") @@ -731,11 +771,11 @@ fn share_cipher_by_uuid( None => {} Some(organization_uuid) => { for uuid in &data.CollectionIds { - match Collection::find_by_uuid_and_org(uuid, &organization_uuid, conn) { + match Collection::find_by_uuid_and_org(uuid, &organization_uuid, conn).await { None => err!("Invalid collection ID provided"), Some(collection) => { - if collection.is_writable_by_user(&headers.user.uuid, conn) { - CollectionCipher::save(&cipher.uuid, &collection.uuid, conn)?; + if collection.is_writable_by_user(&headers.user.uuid, conn).await { + CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?; shared_to_collection = true; } else { err!("No rights to modify the collection") @@ -754,9 +794,10 @@ fn share_cipher_by_uuid( conn, nt, UpdateType::CipherUpdate, - )?; + ) + .await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn))) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn).await)) } /// v2 API for downloading an attachment. This just redirects the client to @@ -766,8 +807,8 @@ fn share_cipher_by_uuid( /// their object storage service. For self-hosted instances, it basically just /// redirects to the same location as before the v2 API. #[get("/ciphers//attachment/")] -fn get_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> JsonResult { - match Attachment::find_by_id(&attachment_id, &conn) { +async fn get_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn) -> JsonResult { + match Attachment::find_by_id(&attachment_id, &conn).await { Some(attachment) if uuid == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))), Some(_) => err!("Attachment doesn't belong to cipher"), None => err!("Attachment doesn't exist"), @@ -793,18 +834,18 @@ enum FileUploadType { /// For upstream's cloud-hosted service, it's an Azure object storage API. /// For self-hosted instances, it's another API on the local instance. #[post("/ciphers//attachment/v2", data = "")] -fn post_attachment_v2( +async fn post_attachment_v2( uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, ) -> JsonResult { - let cipher = match Cipher::find_by_uuid(&uuid, &conn) { + let cipher = match Cipher::find_by_uuid(&uuid, &conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } @@ -812,7 +853,7 @@ fn post_attachment_v2( let data: AttachmentRequestData = data.into_inner().data; let attachment = Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key)); - attachment.save(&conn).expect("Error saving attachment"); + attachment.save(&conn).await.expect("Error saving attachment"); let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id); let response_key = match data.AdminRequest { @@ -825,7 +866,7 @@ fn post_attachment_v2( "AttachmentId": attachment_id, "Url": url, "FileUploadType": FileUploadType::Direct as i32, - response_key: cipher.to_json(&headers.host, &headers.user.uuid, &conn), + response_key: cipher.to_json(&headers.host, &headers.user.uuid, &conn).await, }))) } @@ -851,12 +892,12 @@ async fn save_attachment( conn: DbConn, nt: Notify<'_>, ) -> Result<(Cipher, DbConn), crate::error::Error> { - let cipher = match Cipher::find_by_uuid(&cipher_uuid, &conn) { + let cipher = match Cipher::find_by_uuid(&cipher_uuid, &conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } @@ -871,7 +912,7 @@ async fn save_attachment( match CONFIG.user_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn) + size_adjust; + let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn).await + size_adjust; if left <= 0 { err!("Attachment storage limit reached! Delete some attachments to free up space") } @@ -883,7 +924,7 @@ async fn save_attachment( match CONFIG.org_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn) + size_adjust; + let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn).await + size_adjust; if left <= 0 { err!("Attachment storage limit reached! Delete some attachments to free up space") } @@ -927,10 +968,10 @@ async fn save_attachment( if size != attachment.file_size { // Update the attachment with the actual file size. attachment.file_size = size; - attachment.save(&conn).expect("Error updating attachment"); + attachment.save(&conn).await.expect("Error updating attachment"); } } else { - attachment.delete(&conn).ok(); + attachment.delete(&conn).await.ok(); err!(format!("Attachment size mismatch (expected within [{}, {}], got {})", min_size, max_size, size)); } @@ -945,12 +986,12 @@ async fn save_attachment( err!("No attachment key provided") } let attachment = Attachment::new(file_id, cipher_uuid.clone(), encrypted_filename.unwrap(), size, data.key); - attachment.save(&conn).expect("Error saving attachment"); + attachment.save(&conn).await.expect("Error saving attachment"); } data.data.persist_to(file_path).await?; - nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn)); + nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn).await); Ok((cipher, conn)) } @@ -968,7 +1009,7 @@ async fn post_attachment_v2_data( conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - let attachment = match Attachment::find_by_id(&attachment_id, &conn) { + let attachment = match Attachment::find_by_id(&attachment_id, &conn).await { Some(attachment) if uuid == attachment.cipher_uuid => Some(attachment), Some(_) => err!("Attachment doesn't belong to cipher"), None => err!("Attachment doesn't exist"), @@ -994,7 +1035,7 @@ async fn post_attachment( let (cipher, conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn))) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await)) } #[post("/ciphers//attachment-admin", format = "multipart/form-data", data = "")] @@ -1017,131 +1058,162 @@ async fn post_attachment_share( conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt)?; + _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt).await?; post_attachment(uuid, data, headers, conn, nt).await } #[post("/ciphers//attachment//delete-admin")] -fn delete_attachment_post_admin( +async fn delete_attachment_post_admin( uuid: String, attachment_id: String, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { - delete_attachment(uuid, attachment_id, headers, conn, nt) + delete_attachment(uuid, attachment_id, headers, conn, nt).await } #[post("/ciphers//attachment//delete")] -fn delete_attachment_post( +async fn delete_attachment_post( uuid: String, attachment_id: String, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { - delete_attachment(uuid, attachment_id, headers, conn, nt) + delete_attachment(uuid, attachment_id, headers, conn, nt).await } #[delete("/ciphers//attachment/")] -fn delete_attachment(uuid: String, attachment_id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt) -} - -#[delete("/ciphers//attachment//admin")] -fn delete_attachment_admin( +async fn delete_attachment( uuid: String, attachment_id: String, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { - _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt) + _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt).await +} + +#[delete("/ciphers//attachment//admin")] +async fn delete_attachment_admin( + uuid: String, + attachment_id: String, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &conn, &nt).await } #[post("/ciphers//delete")] -fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt) +async fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt).await } #[post("/ciphers//delete-admin")] -fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt) +async fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt).await } #[put("/ciphers//delete")] -fn delete_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt) +async fn delete_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt).await } #[put("/ciphers//delete-admin")] -fn delete_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt) +async fn delete_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt).await } #[delete("/ciphers/")] -fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt) +async fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt).await } #[delete("/ciphers//admin")] -fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt) +async fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt).await } #[delete("/ciphers", data = "")] -fn delete_cipher_selected(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_multiple_ciphers(data, headers, conn, false, nt) +async fn delete_cipher_selected( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + _delete_multiple_ciphers(data, headers, conn, false, nt).await } #[post("/ciphers/delete", data = "")] -fn delete_cipher_selected_post(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_multiple_ciphers(data, headers, conn, false, nt) +async fn delete_cipher_selected_post( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + _delete_multiple_ciphers(data, headers, conn, false, nt).await } #[put("/ciphers/delete", data = "")] -fn delete_cipher_selected_put(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - _delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete +async fn delete_cipher_selected_put( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + _delete_multiple_ciphers(data, headers, conn, true, nt).await // soft delete } #[delete("/ciphers/admin", data = "")] -fn delete_cipher_selected_admin(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - delete_cipher_selected(data, headers, conn, nt) +async fn delete_cipher_selected_admin( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + delete_cipher_selected(data, headers, conn, nt).await } #[post("/ciphers/delete-admin", data = "")] -fn delete_cipher_selected_post_admin( +async fn delete_cipher_selected_post_admin( data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { - delete_cipher_selected_post(data, headers, conn, nt) + delete_cipher_selected_post(data, headers, conn, nt).await } #[put("/ciphers/delete-admin", data = "")] -fn delete_cipher_selected_put_admin( +async fn delete_cipher_selected_put_admin( data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { - delete_cipher_selected_put(data, headers, conn, nt) + delete_cipher_selected_put(data, headers, conn, nt).await } #[put("/ciphers//restore")] -fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - _restore_cipher_by_uuid(&uuid, &headers, &conn, &nt) +async fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + _restore_cipher_by_uuid(&uuid, &headers, &conn, &nt).await } #[put("/ciphers//restore-admin")] -fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - _restore_cipher_by_uuid(&uuid, &headers, &conn, &nt) +async fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + _restore_cipher_by_uuid(&uuid, &headers, &conn, &nt).await } #[put("/ciphers/restore", data = "")] -fn restore_cipher_selected(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - _restore_multiple_ciphers(data, &headers, &conn, &nt) +async fn restore_cipher_selected( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + _restore_multiple_ciphers(data, &headers, &conn, &nt).await } #[derive(Deserialize)] @@ -1152,12 +1224,17 @@ struct MoveCipherData { } #[post("/ciphers/move", data = "")] -fn move_cipher_selected(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { +async fn move_cipher_selected( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { let data = data.into_inner().data; let user_uuid = headers.user.uuid; if let Some(ref folder_id) = data.FolderId { - match Folder::find_by_uuid(folder_id, &conn) { + match Folder::find_by_uuid(folder_id, &conn).await { Some(folder) => { if folder.user_uuid != user_uuid { err!("Folder is not owned by user") @@ -1168,17 +1245,17 @@ fn move_cipher_selected(data: JsonUpcase, headers: Headers, conn } for uuid in data.Ids { - let cipher = match Cipher::find_by_uuid(&uuid, &conn) { + let cipher = match Cipher::find_by_uuid(&uuid, &conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_accessible_to_user(&user_uuid, &conn) { + if !cipher.is_accessible_to_user(&user_uuid, &conn).await { err!("Cipher is not accessible by user") } // Move cipher - cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &conn)?; + cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &conn).await?; nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &[user_uuid.clone()]); } @@ -1187,13 +1264,13 @@ fn move_cipher_selected(data: JsonUpcase, headers: Headers, conn } #[put("/ciphers/move", data = "")] -fn move_cipher_selected_put( +async fn move_cipher_selected_put( data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { - move_cipher_selected(data, headers, conn, nt) + move_cipher_selected(data, headers, conn, nt).await } #[derive(FromForm)] @@ -1203,12 +1280,12 @@ struct OrganizationId { } #[post("/ciphers/purge?", data = "")] -fn delete_all( +async fn delete_all( organization: Option, data: JsonUpcase, headers: Headers, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { let data: PasswordData = data.into_inner().data; let password_hash = data.MasterPasswordHash; @@ -1222,11 +1299,11 @@ fn delete_all( match organization { Some(org_data) => { // Organization ID in query params, purging organization vault - match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn) { + match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn).await { None => err!("You don't have permission to purge the organization vault"), Some(user_org) => { if user_org.atype == UserOrgType::Owner { - Cipher::delete_all_by_organization(&org_data.org_id, &conn)?; + Cipher::delete_all_by_organization(&org_data.org_id, &conn).await?; nt.send_user_update(UpdateType::Vault, &user); Ok(()) } else { @@ -1238,50 +1315,56 @@ fn delete_all( None => { // No organization ID in query params, purging user vault // Delete ciphers and their attachments - for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) { - cipher.delete(&conn)?; + for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await { + cipher.delete(&conn).await?; } // Delete folders - for f in Folder::find_by_user(&user.uuid, &conn) { - f.delete(&conn)?; + for f in Folder::find_by_user(&user.uuid, &conn).await { + f.delete(&conn).await?; } - user.update_revision(&conn)?; + user.update_revision(&conn).await?; nt.send_user_update(UpdateType::Vault, &user); Ok(()) } } } -fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult { - let mut cipher = match Cipher::find_by_uuid(uuid, conn) { +async fn _delete_cipher_by_uuid( + uuid: &str, + headers: &Headers, + conn: &DbConn, + soft_delete: bool, + nt: &Notify<'_>, +) -> EmptyResult { + let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { err!("Cipher can't be deleted by user") } if soft_delete { cipher.deleted_at = Some(Utc::now().naive_utc()); - cipher.save(conn)?; - nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn)); + cipher.save(conn).await?; + nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await); } else { - cipher.delete(conn)?; - nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(conn)); + cipher.delete(conn).await?; + nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(conn).await); } Ok(()) } -fn _delete_multiple_ciphers( +async fn _delete_multiple_ciphers( data: JsonUpcase, headers: Headers, conn: DbConn, soft_delete: bool, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { let data: Value = data.into_inner().data; @@ -1294,7 +1377,7 @@ fn _delete_multiple_ciphers( }; for uuid in uuids { - if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt) { + if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt).await { return error; }; } @@ -1302,24 +1385,29 @@ fn _delete_multiple_ciphers( Ok(()) } -fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult { - let mut cipher = match Cipher::find_by_uuid(uuid, conn) { +async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify<'_>) -> JsonResult { + let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { err!("Cipher can't be restored by user") } cipher.deleted_at = None; - cipher.save(conn)?; + cipher.save(conn).await?; - nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn)); - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn))) + nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await); + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn).await)) } -fn _restore_multiple_ciphers(data: JsonUpcase, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult { +async fn _restore_multiple_ciphers( + data: JsonUpcase, + headers: &Headers, + conn: &DbConn, + nt: &Notify<'_>, +) -> JsonResult { let data: Value = data.into_inner().data; let uuids = match data.get("Ids") { @@ -1332,7 +1420,7 @@ fn _restore_multiple_ciphers(data: JsonUpcase, headers: &Headers, conn: & let mut ciphers: Vec = Vec::new(); for uuid in uuids { - match _restore_cipher_by_uuid(uuid, headers, conn, nt) { + match _restore_cipher_by_uuid(uuid, headers, conn, nt).await { Ok(json) => ciphers.push(json.into_inner()), err => return err, } @@ -1345,14 +1433,14 @@ fn _restore_multiple_ciphers(data: JsonUpcase, headers: &Headers, conn: & }))) } -fn _delete_cipher_attachment_by_id( +async fn _delete_cipher_attachment_by_id( uuid: &str, attachment_id: &str, headers: &Headers, conn: &DbConn, - nt: &Notify, + nt: &Notify<'_>, ) -> EmptyResult { - let attachment = match Attachment::find_by_id(attachment_id, conn) { + let attachment = match Attachment::find_by_id(attachment_id, conn).await { Some(attachment) => attachment, None => err!("Attachment doesn't exist"), }; @@ -1361,17 +1449,17 @@ fn _delete_cipher_attachment_by_id( err!("Attachment from other cipher") } - let cipher = match Cipher::find_by_uuid(uuid, conn) { + let cipher = match Cipher::find_by_uuid(uuid, conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn) { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { err!("Cipher cannot be deleted by user") } // Delete attachment - attachment.delete(conn)?; - nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn)); + attachment.delete(conn).await?; + nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await); Ok(()) } diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs index 3b6d8c08..1ac8ec98 100644 --- a/src/api/core/emergency_access.rs +++ b/src/api/core/emergency_access.rs @@ -11,6 +11,8 @@ use crate::{ mail, CONFIG, }; +use futures::{stream, stream::StreamExt}; + pub fn routes() -> Vec { routes![ get_contacts, @@ -36,13 +38,17 @@ pub fn routes() -> Vec { // region get #[get("/emergency-access/trusted")] -fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult { +async fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; - let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn); - - let emergency_access_list_json: Vec = - emergency_access_list.iter().map(|e| e.to_json_grantee_details(&conn)).collect(); + let emergency_access_list_json = + stream::iter(EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn).await) + .then(|e| async { + let e = e; // Move out this single variable + e.to_json_grantee_details(&conn).await + }) + .collect::>() + .await; Ok(Json(json!({ "Data": emergency_access_list_json, @@ -52,13 +58,17 @@ fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult { } #[get("/emergency-access/granted")] -fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult { +async fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; - let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn); - - let emergency_access_list_json: Vec = - emergency_access_list.iter().map(|e| e.to_json_grantor_details(&conn)).collect(); + let emergency_access_list_json = + stream::iter(EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn).await) + .then(|e| async { + let e = e; // Move out this single variable + e.to_json_grantor_details(&conn).await + }) + .collect::>() + .await; Ok(Json(json!({ "Data": emergency_access_list_json, @@ -68,11 +78,11 @@ fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult { } #[get("/emergency-access/")] -fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult { +async fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; - match EmergencyAccess::find_by_uuid(&emer_id, &conn) { - Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn))), + match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { + Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn).await)), None => err!("Emergency access not valid."), } } @@ -90,17 +100,25 @@ struct EmergencyAccessUpdateData { } #[put("/emergency-access/", data = "")] -fn put_emergency_access(emer_id: String, data: JsonUpcase, conn: DbConn) -> JsonResult { - post_emergency_access(emer_id, data, conn) +async fn put_emergency_access( + emer_id: String, + data: JsonUpcase, + conn: DbConn, +) -> JsonResult { + post_emergency_access(emer_id, data, conn).await } #[post("/emergency-access/", data = "")] -fn post_emergency_access(emer_id: String, data: JsonUpcase, conn: DbConn) -> JsonResult { +async fn post_emergency_access( + emer_id: String, + data: JsonUpcase, + conn: DbConn, +) -> JsonResult { check_emergency_access_allowed()?; let data: EmergencyAccessUpdateData = data.into_inner().data; - let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emergency_access) => emergency_access, None => err!("Emergency access not valid."), }; @@ -114,7 +132,7 @@ fn post_emergency_access(emer_id: String, data: JsonUpcase")] -fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { +async fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { check_emergency_access_allowed()?; let grantor_user = headers.user; - let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => { if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) { err!("Emergency access not valid.") @@ -137,13 +155,13 @@ fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> E } None => err!("Emergency access not valid."), }; - emergency_access.delete(&conn)?; + emergency_access.delete(&conn).await?; Ok(()) } #[post("/emergency-access//delete")] -fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { - delete_emergency_access(emer_id, headers, conn) +async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { + delete_emergency_access(emer_id, headers, conn).await } // endregion @@ -159,7 +177,7 @@ struct EmergencyAccessInviteData { } #[post("/emergency-access/invite", data = "")] -fn send_invite(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn send_invite(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { check_emergency_access_allowed()?; let data: EmergencyAccessInviteData = data.into_inner().data; @@ -180,7 +198,7 @@ fn send_invite(data: JsonUpcase, headers: Headers, co err!("You can not set yourself as an emergency contact.") } - let grantee_user = match User::find_by_mail(&email, &conn) { + let grantee_user = match User::find_by_mail(&email, &conn).await { None => { if !CONFIG.invitations_allowed() { err!(format!("Grantee user does not exist: {}", email)) @@ -192,11 +210,11 @@ fn send_invite(data: JsonUpcase, headers: Headers, co if !CONFIG.mail_enabled() { let invitation = Invitation::new(email.clone()); - invitation.save(&conn)?; + invitation.save(&conn).await?; } let mut user = User::new(email.clone()); - user.save(&conn)?; + user.save(&conn).await?; user } Some(user) => user, @@ -208,6 +226,7 @@ fn send_invite(data: JsonUpcase, headers: Headers, co &grantee_user.email, &conn, ) + .await .is_some() { err!(format!("Grantee user already invited: {}", email)) @@ -220,7 +239,7 @@ fn send_invite(data: JsonUpcase, headers: Headers, co new_type, wait_time_days, ); - new_emergency_access.save(&conn)?; + new_emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_invite( @@ -232,9 +251,9 @@ fn send_invite(data: JsonUpcase, headers: Headers, co )?; } else { // Automatically mark user as accepted if no email invites - match User::find_by_mail(&email, &conn) { + match User::find_by_mail(&email, &conn).await { Some(user) => { - match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()) { + match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()).await { Ok(v) => (v), Err(e) => err!(e.to_string()), } @@ -247,10 +266,10 @@ fn send_invite(data: JsonUpcase, headers: Headers, co } #[post("/emergency-access//reinvite")] -fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { +async fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { check_emergency_access_allowed()?; - let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -268,7 +287,7 @@ fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult None => err!("Email not valid."), }; - let grantee_user = match User::find_by_mail(&email, &conn) { + let grantee_user = match User::find_by_mail(&email, &conn).await { Some(user) => user, None => err!("Grantee user not found."), }; @@ -284,13 +303,15 @@ fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult Some(grantor_user.email), )?; } else { - if Invitation::find_by_mail(&email, &conn).is_none() { + if Invitation::find_by_mail(&email, &conn).await.is_none() { let invitation = Invitation::new(email); - invitation.save(&conn)?; + invitation.save(&conn).await?; } // Automatically mark user as accepted if no email invites - match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow()) { + match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow()) + .await + { Ok(v) => (v), Err(e) => err!(e.to_string()), } @@ -306,28 +327,28 @@ struct AcceptData { } #[post("/emergency-access//accept", data = "")] -fn accept_invite(emer_id: String, data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn accept_invite(emer_id: String, data: JsonUpcase, conn: DbConn) -> EmptyResult { check_emergency_access_allowed()?; let data: AcceptData = data.into_inner().data; let token = &data.Token; let claims = decode_emergency_access_invite(token)?; - let grantee_user = match User::find_by_mail(&claims.email, &conn) { + let grantee_user = match User::find_by_mail(&claims.email, &conn).await { Some(user) => { - Invitation::take(&claims.email, &conn); + Invitation::take(&claims.email, &conn).await; user } None => err!("Invited user not found"), }; - let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; // get grantor user to send Accepted email - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { + let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; @@ -336,7 +357,7 @@ fn accept_invite(emer_id: String, data: JsonUpcase, conn: DbConn) -> && (claims.grantor_name.is_some() && grantor_user.name == claims.grantor_name.unwrap()) && (claims.grantor_email.is_some() && grantor_user.email == claims.grantor_email.unwrap()) { - match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn) { + match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn).await { Ok(v) => (v), Err(e) => err!(e.to_string()), } @@ -351,8 +372,13 @@ fn accept_invite(emer_id: String, data: JsonUpcase, conn: DbConn) -> } } -fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option, conn: &DbConn) -> EmptyResult { - let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn) { +async fn accept_invite_process( + grantee_uuid: String, + emer_id: String, + email: Option, + conn: &DbConn, +) -> EmptyResult { + let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -369,7 +395,7 @@ fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option/confirm", data = "")] -fn confirm_emergency_access( +async fn confirm_emergency_access( emer_id: String, data: JsonUpcase, headers: Headers, @@ -391,7 +417,7 @@ fn confirm_emergency_access( let data: ConfirmData = data.into_inner().data; let key = data.Key; - let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -402,13 +428,13 @@ fn confirm_emergency_access( err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn) { + let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) { + let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await { Some(user) => user, None => err!("Grantee user not found."), }; @@ -417,7 +443,7 @@ fn confirm_emergency_access( emergency_access.key_encrypted = Some(key); emergency_access.email = None; - emergency_access.save(&conn)?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name)?; @@ -433,11 +459,11 @@ fn confirm_emergency_access( // region access emergency access #[post("/emergency-access//initiate")] -fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { +async fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; let initiating_user = headers.user; - let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -448,7 +474,7 @@ fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { + let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; @@ -458,7 +484,7 @@ fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> emergency_access.updated_at = now; emergency_access.recovery_initiated_at = Some(now); emergency_access.last_notification_at = Some(now); - emergency_access.save(&conn)?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_recovery_initiated( @@ -472,11 +498,11 @@ fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> } #[post("/emergency-access//approve")] -fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { +async fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; let approving_user = headers.user; - let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -487,19 +513,19 @@ fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn) { + let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) { + let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await { Some(user) => user, None => err!("Grantee user not found."), }; emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32; - emergency_access.save(&conn)?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)?; @@ -511,11 +537,11 @@ fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> } #[post("/emergency-access//reject")] -fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { +async fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; let rejecting_user = headers.user; - let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -527,19 +553,19 @@ fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> J err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn) { + let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) { + let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await { Some(user) => user, None => err!("Grantee user not found."), }; emergency_access.status = EmergencyAccessStatus::Confirmed as i32; - emergency_access.save(&conn)?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name)?; @@ -555,12 +581,12 @@ fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> J // region action #[post("/emergency-access//view")] -fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { +async fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; let requesting_user = headers.user; let host = headers.host; - let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -569,10 +595,13 @@ fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> Jso err!("Emergency access not valid.") } - let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn); - - let ciphers_json: Vec = - ciphers.iter().map(|c| c.to_json(&host, &emergency_access.grantor_uuid, &conn)).collect(); + let ciphers_json = stream::iter(Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn).await) + .then(|c| async { + let c = c; // Move out this single variable + c.to_json(&host, &emergency_access.grantor_uuid, &conn).await + }) + .collect::>() + .await; Ok(Json(json!({ "Ciphers": ciphers_json, @@ -582,11 +611,11 @@ fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> Jso } #[post("/emergency-access//takeover")] -fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { +async fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; let requesting_user = headers.user; - let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -595,7 +624,7 @@ fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { + let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; @@ -616,7 +645,7 @@ struct EmergencyAccessPasswordData { } #[post("/emergency-access//password", data = "")] -fn password_emergency_access( +async fn password_emergency_access( emer_id: String, data: JsonUpcase, headers: Headers, @@ -629,7 +658,7 @@ fn password_emergency_access( let key = data.Key; let requesting_user = headers.user; - let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -638,7 +667,7 @@ fn password_emergency_access( err!("Emergency access not valid.") } - let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { + let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; @@ -646,18 +675,15 @@ fn password_emergency_access( // change grantor_user password grantor_user.set_password(new_master_password_hash, None); grantor_user.akey = key; - grantor_user.save(&conn)?; + grantor_user.save(&conn).await?; // Disable TwoFactor providers since they will otherwise block logins - TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn)?; - - // Removing owner, check that there are at least another owner - let user_org_grantor = UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn); + TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn).await?; // Remove grantor from all organisations unless Owner - for user_org in user_org_grantor { + for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn).await { if user_org.atype != UserOrgType::Owner as i32 { - user_org.delete(&conn)?; + user_org.delete(&conn).await?; } } Ok(()) @@ -666,9 +692,9 @@ fn password_emergency_access( // endregion #[get("/emergency-access//policies")] -fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { +async fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { let requesting_user = headers.user; - let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { + let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; @@ -677,13 +703,13 @@ fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { + let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await { Some(user) => user, None => err!("Grantor user not found."), }; let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn); - let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); + let policies_json: Vec = policies.await.iter().map(OrgPolicy::to_json).collect(); Ok(Json(json!({ "Data": policies_json, @@ -716,7 +742,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) { } if let Ok(conn) = pool.get().await { - let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn); + let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await; if emergency_access_list.is_empty() { debug!("No emergency request timeout to approve"); @@ -728,15 +754,17 @@ pub async fn emergency_request_timeout_job(pool: DbPool) { >= emer.recovery_initiated_at.unwrap() + Duration::days(emer.wait_time_days as i64) { emer.status = EmergencyAccessStatus::RecoveryApproved as i32; - emer.save(&conn).expect("Cannot save emergency access on job"); + emer.save(&conn).await.expect("Cannot save emergency access on job"); if CONFIG.mail_enabled() { // get grantor user to send Accepted email - let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found."); + let grantor_user = + User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found."); // get grantee user to send Accepted email let grantee_user = User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn) + .await .expect("Grantee user not found."); mail::send_emergency_access_recovery_timed_out( @@ -763,7 +791,7 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) { } if let Ok(conn) = pool.get().await { - let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn); + let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await; if emergency_access_list.is_empty() { debug!("No emergency request reminder notification to send"); @@ -777,15 +805,17 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) { || (emer.last_notification_at.is_some() && Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1))) { - emer.save(&conn).expect("Cannot save emergency access on job"); + emer.save(&conn).await.expect("Cannot save emergency access on job"); if CONFIG.mail_enabled() { // get grantor user to send Accepted email - let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found."); + let grantor_user = + User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found."); // get grantee user to send Accepted email let grantee_user = User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn) + .await .expect("Grantee user not found."); mail::send_emergency_access_recovery_reminder( diff --git a/src/api/core/folders.rs b/src/api/core/folders.rs index e2a32f7b..3b6d2c3a 100644 --- a/src/api/core/folders.rs +++ b/src/api/core/folders.rs @@ -12,9 +12,8 @@ pub fn routes() -> Vec { } #[get("/folders")] -fn get_folders(headers: Headers, conn: DbConn) -> Json { - let folders = Folder::find_by_user(&headers.user.uuid, &conn); - +async fn get_folders(headers: Headers, conn: DbConn) -> Json { + let folders = Folder::find_by_user(&headers.user.uuid, &conn).await; let folders_json: Vec = folders.iter().map(Folder::to_json).collect(); Json(json!({ @@ -25,8 +24,8 @@ fn get_folders(headers: Headers, conn: DbConn) -> Json { } #[get("/folders/")] -fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { - let folder = match Folder::find_by_uuid(&uuid, &conn) { +async fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { + let folder = match Folder::find_by_uuid(&uuid, &conn).await { Some(folder) => folder, _ => err!("Invalid folder"), }; @@ -45,27 +44,39 @@ pub struct FolderData { } #[post("/folders", data = "")] -fn post_folders(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { +async fn post_folders(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { let data: FolderData = data.into_inner().data; let mut folder = Folder::new(headers.user.uuid, data.Name); - folder.save(&conn)?; + folder.save(&conn).await?; nt.send_folder_update(UpdateType::FolderCreate, &folder); Ok(Json(folder.to_json())) } #[post("/folders/", data = "")] -fn post_folder(uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - put_folder(uuid, data, headers, conn, nt) +async fn post_folder( + uuid: String, + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + put_folder(uuid, data, headers, conn, nt).await } #[put("/folders/", data = "")] -fn put_folder(uuid: String, data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { +async fn put_folder( + uuid: String, + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { let data: FolderData = data.into_inner().data; - let mut folder = match Folder::find_by_uuid(&uuid, &conn) { + let mut folder = match Folder::find_by_uuid(&uuid, &conn).await { Some(folder) => folder, _ => err!("Invalid folder"), }; @@ -76,20 +87,20 @@ fn put_folder(uuid: String, data: JsonUpcase, headers: Headers, conn folder.name = data.Name; - folder.save(&conn)?; + folder.save(&conn).await?; nt.send_folder_update(UpdateType::FolderUpdate, &folder); Ok(Json(folder.to_json())) } #[post("/folders//delete")] -fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - delete_folder(uuid, headers, conn, nt) +async fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + delete_folder(uuid, headers, conn, nt).await } #[delete("/folders/")] -fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - let folder = match Folder::find_by_uuid(&uuid, &conn) { +async fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let folder = match Folder::find_by_uuid(&uuid, &conn).await { Some(folder) => folder, _ => err!("Invalid folder"), }; @@ -99,7 +110,7 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> Em } // Delete the actual folder entry - folder.delete(&conn)?; + folder.delete(&conn).await?; nt.send_folder_update(UpdateType::FolderDelete, &folder); Ok(()) diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 79556494..e34343d3 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -121,7 +121,7 @@ struct EquivDomainData { } #[post("/settings/domains", data = "")] -fn post_eq_domains(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn post_eq_domains(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: EquivDomainData = data.into_inner().data; let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default(); @@ -133,14 +133,14 @@ fn post_eq_domains(data: JsonUpcase, headers: Headers, conn: Db user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string()); user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string()); - user.save(&conn)?; + user.save(&conn).await?; Ok(Json(json!({}))) } #[put("/settings/domains", data = "")] -fn put_eq_domains(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - post_eq_domains(data, headers, conn) +async fn put_eq_domains(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + post_eq_domains(data, headers, conn).await } #[get("/hibp/breach?")] diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index 5716cbf0..bb6c6634 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -10,6 +10,8 @@ use crate::{ mail, CONFIG, }; +use futures::{stream, stream::StreamExt}; + pub fn routes() -> Vec { routes![ get_organization, @@ -98,11 +100,11 @@ struct OrgBulkIds { } #[post("/organizations", data = "")] -fn create_organization(headers: Headers, data: JsonUpcase, conn: DbConn) -> JsonResult { +async fn create_organization(headers: Headers, data: JsonUpcase, conn: DbConn) -> JsonResult { if !CONFIG.is_org_creation_allowed(&headers.user.email) { err!("User not allowed to create organizations") } - if OrgPolicy::is_applicable_to_user(&headers.user.uuid, OrgPolicyType::SingleOrg, &conn) { + if OrgPolicy::is_applicable_to_user(&headers.user.uuid, OrgPolicyType::SingleOrg, &conn).await { err!( "You may not create an organization. You belong to an organization which has a policy that prohibits you from being a member of any other organization." ) @@ -125,15 +127,15 @@ fn create_organization(headers: Headers, data: JsonUpcase, conn: DbConn user_org.atype = UserOrgType::Owner as i32; user_org.status = UserOrgStatus::Confirmed as i32; - org.save(&conn)?; - user_org.save(&conn)?; - collection.save(&conn)?; + org.save(&conn).await?; + user_org.save(&conn).await?; + collection.save(&conn).await?; Ok(Json(org.to_json())) } #[delete("/organizations/", data = "")] -fn delete_organization( +async fn delete_organization( org_id: String, data: JsonUpcase, headers: OwnerHeaders, @@ -146,61 +148,61 @@ fn delete_organization( err!("Invalid password") } - match Organization::find_by_uuid(&org_id, &conn) { + match Organization::find_by_uuid(&org_id, &conn).await { None => err!("Organization not found"), - Some(org) => org.delete(&conn), + Some(org) => org.delete(&conn).await, } } #[post("/organizations//delete", data = "")] -fn post_delete_organization( +async fn post_delete_organization( org_id: String, data: JsonUpcase, headers: OwnerHeaders, conn: DbConn, ) -> EmptyResult { - delete_organization(org_id, data, headers, conn) + delete_organization(org_id, data, headers, conn).await } #[post("/organizations//leave")] -fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyResult { - match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) { +async fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyResult { + match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await { None => err!("User not part of organization"), Some(user_org) => { if user_org.atype == UserOrgType::Owner { let num_owners = - UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len(); + UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).await.len(); if num_owners <= 1 { err!("The last owner can't leave") } } - user_org.delete(&conn) + user_org.delete(&conn).await } } } #[get("/organizations/")] -fn get_organization(org_id: String, _headers: OwnerHeaders, conn: DbConn) -> JsonResult { - match Organization::find_by_uuid(&org_id, &conn) { +async fn get_organization(org_id: String, _headers: OwnerHeaders, conn: DbConn) -> JsonResult { + match Organization::find_by_uuid(&org_id, &conn).await { Some(organization) => Ok(Json(organization.to_json())), None => err!("Can't find organization details"), } } #[put("/organizations/", data = "")] -fn put_organization( +async fn put_organization( org_id: String, headers: OwnerHeaders, data: JsonUpcase, conn: DbConn, ) -> JsonResult { - post_organization(org_id, headers, data, conn) + post_organization(org_id, headers, data, conn).await } #[post("/organizations/", data = "")] -fn post_organization( +async fn post_organization( org_id: String, _headers: OwnerHeaders, data: JsonUpcase, @@ -208,7 +210,7 @@ fn post_organization( ) -> JsonResult { let data: OrganizationUpdateData = data.into_inner().data; - let mut org = match Organization::find_by_uuid(&org_id, &conn) { + let mut org = match Organization::find_by_uuid(&org_id, &conn).await { Some(organization) => organization, None => err!("Can't find organization details"), }; @@ -216,16 +218,16 @@ fn post_organization( org.name = data.Name; org.billing_email = data.BillingEmail; - org.save(&conn)?; + org.save(&conn).await?; Ok(Json(org.to_json())) } // GET /api/collections?writeOnly=false #[get("/collections")] -fn get_user_collections(headers: Headers, conn: DbConn) -> Json { +async fn get_user_collections(headers: Headers, conn: DbConn) -> Json { Json(json!({ "Data": - Collection::find_by_user_uuid(&headers.user.uuid, &conn) + Collection::find_by_user_uuid(&headers.user.uuid, &conn).await .iter() .map(Collection::to_json) .collect::(), @@ -235,10 +237,10 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> Json { } #[get("/organizations//collections")] -fn get_org_collections(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json { +async fn get_org_collections(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json { Json(json!({ "Data": - Collection::find_by_organization(&org_id, &conn) + Collection::find_by_organization(&org_id, &conn).await .iter() .map(Collection::to_json) .collect::(), @@ -248,7 +250,7 @@ fn get_org_collections(org_id: String, _headers: ManagerHeadersLoose, conn: DbCo } #[post("/organizations//collections", data = "")] -fn post_organization_collections( +async fn post_organization_collections( org_id: String, headers: ManagerHeadersLoose, data: JsonUpcase, @@ -256,43 +258,43 @@ fn post_organization_collections( ) -> JsonResult { let data: NewCollectionData = data.into_inner().data; - let org = match Organization::find_by_uuid(&org_id, &conn) { + let org = match Organization::find_by_uuid(&org_id, &conn).await { Some(organization) => organization, None => err!("Can't find organization details"), }; // Get the user_organization record so that we can check if the user has access to all collections. - let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) { + let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await { Some(u) => u, None => err!("User is not part of organization"), }; let collection = Collection::new(org.uuid, data.Name); - collection.save(&conn)?; + collection.save(&conn).await?; // If the user doesn't have access to all collections, only in case of a Manger, // then we need to save the creating user uuid (Manager) to the users_collection table. // Else the user will not have access to his own created collection. if !user_org.access_all { - CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn)?; + CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn).await?; } Ok(Json(collection.to_json())) } #[put("/organizations//collections/", data = "")] -fn put_organization_collection_update( +async fn put_organization_collection_update( org_id: String, col_id: String, headers: ManagerHeaders, data: JsonUpcase, conn: DbConn, ) -> JsonResult { - post_organization_collection_update(org_id, col_id, headers, data, conn) + post_organization_collection_update(org_id, col_id, headers, data, conn).await } #[post("/organizations//collections/", data = "")] -fn post_organization_collection_update( +async fn post_organization_collection_update( org_id: String, col_id: String, _headers: ManagerHeaders, @@ -301,12 +303,12 @@ fn post_organization_collection_update( ) -> JsonResult { let data: NewCollectionData = data.into_inner().data; - let org = match Organization::find_by_uuid(&org_id, &conn) { + let org = match Organization::find_by_uuid(&org_id, &conn).await { Some(organization) => organization, None => err!("Can't find organization details"), }; - let mut collection = match Collection::find_by_uuid(&col_id, &conn) { + let mut collection = match Collection::find_by_uuid(&col_id, &conn).await { Some(collection) => collection, None => err!("Collection not found"), }; @@ -316,20 +318,20 @@ fn post_organization_collection_update( } collection.name = data.Name; - collection.save(&conn)?; + collection.save(&conn).await?; Ok(Json(collection.to_json())) } #[delete("/organizations//collections//user/")] -fn delete_organization_collection_user( +async fn delete_organization_collection_user( org_id: String, col_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - let collection = match Collection::find_by_uuid(&col_id, &conn) { + let collection = match Collection::find_by_uuid(&col_id, &conn).await { None => err!("Collection not found"), Some(collection) => { if collection.org_uuid == org_id { @@ -340,40 +342,40 @@ fn delete_organization_collection_user( } }; - match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) { + match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn).await { None => err!("User not found in organization"), Some(user_org) => { - match CollectionUser::find_by_collection_and_user(&collection.uuid, &user_org.user_uuid, &conn) { + match CollectionUser::find_by_collection_and_user(&collection.uuid, &user_org.user_uuid, &conn).await { None => err!("User not assigned to collection"), - Some(col_user) => col_user.delete(&conn), + Some(col_user) => col_user.delete(&conn).await, } } } } #[post("/organizations//collections//delete-user/")] -fn post_organization_collection_delete_user( +async fn post_organization_collection_delete_user( org_id: String, col_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - delete_organization_collection_user(org_id, col_id, org_user_id, headers, conn) + delete_organization_collection_user(org_id, col_id, org_user_id, headers, conn).await } #[delete("/organizations//collections/")] -fn delete_organization_collection( +async fn delete_organization_collection( org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn, ) -> EmptyResult { - match Collection::find_by_uuid(&col_id, &conn) { + match Collection::find_by_uuid(&col_id, &conn).await { None => err!("Collection not found"), Some(collection) => { if collection.org_uuid == org_id { - collection.delete(&conn) + collection.delete(&conn).await } else { err!("Collection and Organization id do not match") } @@ -389,19 +391,24 @@ struct DeleteCollectionData { } #[post("/organizations//collections//delete", data = "<_data>")] -fn post_organization_collection_delete( +async fn post_organization_collection_delete( org_id: String, col_id: String, headers: ManagerHeaders, _data: JsonUpcase, conn: DbConn, ) -> EmptyResult { - delete_organization_collection(org_id, col_id, headers, conn) + delete_organization_collection(org_id, col_id, headers, conn).await } #[get("/organizations//collections//details")] -fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHeaders, conn: DbConn) -> JsonResult { - match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) { +async fn get_org_collection_detail( + org_id: String, + coll_id: String, + headers: ManagerHeaders, + conn: DbConn, +) -> JsonResult { + match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn).await { None => err!("Collection not found"), Some(collection) => { if collection.org_uuid != org_id { @@ -414,28 +421,29 @@ fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHe } #[get("/organizations//collections//users")] -fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult { +async fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult { // Get org and collection, check that collection is from org - let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) { + let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn).await { None => err!("Collection not found in Organization"), Some(collection) => collection, }; - // Get the users from collection - let user_list: Vec = CollectionUser::find_by_collection(&collection.uuid, &conn) - .iter() - .map(|col_user| { + let user_list = stream::iter(CollectionUser::find_by_collection(&collection.uuid, &conn).await) + .then(|col_user| async { + let col_user = col_user; // Move out this single variable UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn) + .await .unwrap() - .to_json_user_access_restrictions(col_user) + .to_json_user_access_restrictions(&col_user) }) - .collect(); + .collect::>() + .await; Ok(Json(json!(user_list))) } #[put("/organizations//collections//users", data = "")] -fn put_collection_users( +async fn put_collection_users( org_id: String, coll_id: String, data: JsonUpcaseVec, @@ -443,16 +451,16 @@ fn put_collection_users( conn: DbConn, ) -> EmptyResult { // Get org and collection, check that collection is from org - if Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn).is_none() { + if Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn).await.is_none() { err!("Collection not found in Organization") } // Delete all the user-collections - CollectionUser::delete_all_by_collection(&coll_id, &conn)?; + CollectionUser::delete_all_by_collection(&coll_id, &conn).await?; // And then add all the received ones (except if the user has access_all) for d in data.iter().map(|d| &d.data) { - let user = match UserOrganization::find_by_uuid(&d.Id, &conn) { + let user = match UserOrganization::find_by_uuid(&d.Id, &conn).await { Some(u) => u, None => err!("User is not part of organization"), }; @@ -461,7 +469,7 @@ fn put_collection_users( continue; } - CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, d.HidePasswords, &conn)?; + CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, d.HidePasswords, &conn).await?; } Ok(()) @@ -474,10 +482,14 @@ struct OrgIdData { } #[get("/ciphers/organization-details?")] -fn get_org_details(data: OrgIdData, headers: Headers, conn: DbConn) -> Json { - let ciphers = Cipher::find_by_org(&data.organization_id, &conn); - let ciphers_json: Vec = - ciphers.iter().map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn)).collect(); +async fn get_org_details(data: OrgIdData, headers: Headers, conn: DbConn) -> Json { + let ciphers_json = stream::iter(Cipher::find_by_org(&data.organization_id, &conn).await) + .then(|c| async { + let c = c; // Move out this single variable + c.to_json(&headers.host, &headers.user.uuid, &conn).await + }) + .collect::>() + .await; Json(json!({ "Data": ciphers_json, @@ -487,9 +499,14 @@ fn get_org_details(data: OrgIdData, headers: Headers, conn: DbConn) -> Json/users")] -fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json { - let users = UserOrganization::find_by_org(&org_id, &conn); - let users_json: Vec = users.iter().map(|c| c.to_json_user_details(&conn)).collect(); +async fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json { + let users_json = stream::iter(UserOrganization::find_by_org(&org_id, &conn).await) + .then(|u| async { + let u = u; // Move out this single variable + u.to_json_user_details(&conn).await + }) + .collect::>() + .await; Json(json!({ "Data": users_json, @@ -499,10 +516,15 @@ fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> } #[post("/organizations//keys", data = "")] -fn post_org_keys(org_id: String, data: JsonUpcase, _headers: AdminHeaders, conn: DbConn) -> JsonResult { +async fn post_org_keys( + org_id: String, + data: JsonUpcase, + _headers: AdminHeaders, + conn: DbConn, +) -> JsonResult { let data: OrgKeyData = data.into_inner().data; - let mut org = match Organization::find_by_uuid(&org_id, &conn) { + let mut org = match Organization::find_by_uuid(&org_id, &conn).await { Some(organization) => { if organization.private_key.is_some() && organization.public_key.is_some() { err!("Organization Keys already exist") @@ -515,7 +537,7 @@ fn post_org_keys(org_id: String, data: JsonUpcase, _headers: AdminHe org.private_key = Some(data.EncryptedPrivateKey); org.public_key = Some(data.PublicKey); - org.save(&conn)?; + org.save(&conn).await?; Ok(Json(json!({ "Object": "organizationKeys", @@ -542,7 +564,7 @@ struct InviteData { } #[post("/organizations//users/invite", data = "")] -fn send_invite(org_id: String, data: JsonUpcase, headers: AdminHeaders, conn: DbConn) -> EmptyResult { +async fn send_invite(org_id: String, data: JsonUpcase, headers: AdminHeaders, conn: DbConn) -> EmptyResult { let data: InviteData = data.into_inner().data; let new_type = match UserOrgType::from_str(&data.Type.into_string()) { @@ -561,7 +583,7 @@ fn send_invite(org_id: String, data: JsonUpcase, headers: AdminHeade } else { UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites }; - let user = match User::find_by_mail(&email, &conn) { + let user = match User::find_by_mail(&email, &conn).await { None => { if !CONFIG.invitations_allowed() { err!(format!("User does not exist: {}", email)) @@ -573,16 +595,16 @@ fn send_invite(org_id: String, data: JsonUpcase, headers: AdminHeade if !CONFIG.mail_enabled() { let invitation = Invitation::new(email.clone()); - invitation.save(&conn)?; + invitation.save(&conn).await?; } let mut user = User::new(email.clone()); - user.save(&conn)?; + user.save(&conn).await?; user_org_status = UserOrgStatus::Invited as i32; user } Some(user) => { - if UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).is_some() { + if UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).await.is_some() { err!(format!("User already in organization: {}", email)) } else { user @@ -599,19 +621,20 @@ fn send_invite(org_id: String, data: JsonUpcase, headers: AdminHeade // If no accessAll, add the collections received if !access_all { for col in data.Collections.iter().flatten() { - match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) { + match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn).await { None => err!("Collection not found in Organization"), Some(collection) => { - CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, col.HidePasswords, &conn)?; + CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, col.HidePasswords, &conn) + .await?; } } } } - new_user.save(&conn)?; + new_user.save(&conn).await?; if CONFIG.mail_enabled() { - let org_name = match Organization::find_by_uuid(&org_id, &conn) { + let org_name = match Organization::find_by_uuid(&org_id, &conn).await { Some(org) => org.name, None => err!("Error looking up organization"), }; @@ -631,7 +654,7 @@ fn send_invite(org_id: String, data: JsonUpcase, headers: AdminHeade } #[post("/organizations//users/reinvite", data = "")] -fn bulk_reinvite_user( +async fn bulk_reinvite_user( org_id: String, data: JsonUpcase, headers: AdminHeaders, @@ -641,7 +664,7 @@ fn bulk_reinvite_user( let mut bulk_response = Vec::new(); for org_user_id in data.Ids { - let err_msg = match _reinvite_user(&org_id, &org_user_id, &headers.user.email, &conn) { + let err_msg = match _reinvite_user(&org_id, &org_user_id, &headers.user.email, &conn).await { Ok(_) => String::from(""), Err(e) => format!("{:?}", e), }; @@ -663,11 +686,11 @@ fn bulk_reinvite_user( } #[post("/organizations//users//reinvite")] -fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { - _reinvite_user(&org_id, &user_org, &headers.user.email, &conn) +async fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { + _reinvite_user(&org_id, &user_org, &headers.user.email, &conn).await } -fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &DbConn) -> EmptyResult { +async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &DbConn) -> EmptyResult { if !CONFIG.invitations_allowed() { err!("Invitations are not allowed.") } @@ -676,7 +699,7 @@ fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &D err!("SMTP is not configured.") } - let user_org = match UserOrganization::find_by_uuid(user_org, conn) { + let user_org = match UserOrganization::find_by_uuid(user_org, conn).await { Some(user_org) => user_org, None => err!("The user hasn't been invited to the organization."), }; @@ -685,12 +708,12 @@ fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &D err!("The user is already accepted or confirmed to the organization") } - let user = match User::find_by_uuid(&user_org.user_uuid, conn) { + let user = match User::find_by_uuid(&user_org.user_uuid, conn).await { Some(user) => user, None => err!("User not found."), }; - let org_name = match Organization::find_by_uuid(org_id, conn) { + let org_name = match Organization::find_by_uuid(org_id, conn).await { Some(org) => org.name, None => err!("Error looking up organization."), }; @@ -706,7 +729,7 @@ fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &D )?; } else { let invitation = Invitation::new(user.email); - invitation.save(conn)?; + invitation.save(conn).await?; } Ok(()) @@ -719,18 +742,23 @@ struct AcceptData { } #[post("/organizations/<_org_id>/users/<_org_user_id>/accept", data = "")] -fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn accept_invite( + _org_id: String, + _org_user_id: String, + data: JsonUpcase, + conn: DbConn, +) -> EmptyResult { // The web-vault passes org_id and org_user_id in the URL, but we are just reading them from the JWT instead let data: AcceptData = data.into_inner().data; let token = &data.Token; let claims = decode_invite(token)?; - match User::find_by_mail(&claims.email, &conn) { + match User::find_by_mail(&claims.email, &conn).await { Some(_) => { - Invitation::take(&claims.email, &conn); + Invitation::take(&claims.email, &conn).await; if let (Some(user_org), Some(org)) = (&claims.user_org_id, &claims.org_id) { - let mut user_org = match UserOrganization::find_by_uuid_and_org(user_org, org, &conn) { + let mut user_org = match UserOrganization::find_by_uuid_and_org(user_org, org, &conn).await { Some(user_org) => user_org, None => err!("Error accepting the invitation"), }; @@ -739,11 +767,11 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase p.enabled, None => false, }; @@ -754,12 +782,15 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase p.enabled, None => false, }; if single_org_policy_enabled && user_org.atype < UserOrgType::Admin { let is_member_of_another_org = UserOrganization::find_any_state_by_user(&user_org.user_uuid, &conn) + .await .into_iter() .filter(|uo| uo.org_uuid != user_org.org_uuid) .count() @@ -770,14 +801,14 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase err!("Invited user not found"), @@ -786,7 +817,7 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase org.name, None => err!("Organization not found."), }; @@ -804,7 +835,12 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase/users/confirm", data = "")] -fn bulk_confirm_invite(org_id: String, data: JsonUpcase, headers: AdminHeaders, conn: DbConn) -> Json { +async fn bulk_confirm_invite( + org_id: String, + data: JsonUpcase, + headers: AdminHeaders, + conn: DbConn, +) -> Json { let data = data.into_inner().data; let mut bulk_response = Vec::new(); @@ -813,7 +849,7 @@ fn bulk_confirm_invite(org_id: String, data: JsonUpcase, headers: AdminHe for invite in keys { let org_user_id = invite["Id"].as_str().unwrap_or_default(); let user_key = invite["Key"].as_str().unwrap_or_default(); - let err_msg = match _confirm_invite(&org_id, org_user_id, user_key, &headers, &conn) { + let err_msg = match _confirm_invite(&org_id, org_user_id, user_key, &headers, &conn).await { Ok(_) => String::from(""), Err(e) => format!("{:?}", e), }; @@ -838,7 +874,7 @@ fn bulk_confirm_invite(org_id: String, data: JsonUpcase, headers: AdminHe } #[post("/organizations//users//confirm", data = "")] -fn confirm_invite( +async fn confirm_invite( org_id: String, org_user_id: String, data: JsonUpcase, @@ -847,15 +883,21 @@ fn confirm_invite( ) -> EmptyResult { let data = data.into_inner().data; let user_key = data["Key"].as_str().unwrap_or_default(); - _confirm_invite(&org_id, &org_user_id, user_key, &headers, &conn) + _confirm_invite(&org_id, &org_user_id, user_key, &headers, &conn).await } -fn _confirm_invite(org_id: &str, org_user_id: &str, key: &str, headers: &AdminHeaders, conn: &DbConn) -> EmptyResult { +async fn _confirm_invite( + org_id: &str, + org_user_id: &str, + key: &str, + headers: &AdminHeaders, + conn: &DbConn, +) -> EmptyResult { if key.is_empty() || org_user_id.is_empty() { err!("Key or UserId is not set, unable to process request"); } - let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn) { + let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { Some(user) => user, None => err!("The specified user isn't a member of the organization"), }; @@ -872,28 +914,28 @@ fn _confirm_invite(org_id: &str, org_user_id: &str, key: &str, headers: &AdminHe user_to_confirm.akey = key.to_string(); if CONFIG.mail_enabled() { - let org_name = match Organization::find_by_uuid(org_id, conn) { + let org_name = match Organization::find_by_uuid(org_id, conn).await { Some(org) => org.name, None => err!("Error looking up organization."), }; - let address = match User::find_by_uuid(&user_to_confirm.user_uuid, conn) { + let address = match User::find_by_uuid(&user_to_confirm.user_uuid, conn).await { Some(user) => user.email, None => err!("Error looking up user."), }; mail::send_invite_confirmed(&address, &org_name)?; } - user_to_confirm.save(conn) + user_to_confirm.save(conn).await } #[get("/organizations//users/")] -fn get_user(org_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult { - let user = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) { +async fn get_user(org_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult { + let user = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn).await { Some(user) => user, None => err!("The specified user isn't a member of the organization"), }; - Ok(Json(user.to_json_details(&conn))) + Ok(Json(user.to_json_details(&conn).await)) } #[derive(Deserialize)] @@ -905,18 +947,18 @@ struct EditUserData { } #[put("/organizations//users/", data = "", rank = 1)] -fn put_organization_user( +async fn put_organization_user( org_id: String, org_user_id: String, data: JsonUpcase, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - edit_user(org_id, org_user_id, data, headers, conn) + edit_user(org_id, org_user_id, data, headers, conn).await } #[post("/organizations//users/", data = "", rank = 1)] -fn edit_user( +async fn edit_user( org_id: String, org_user_id: String, data: JsonUpcase, @@ -930,7 +972,7 @@ fn edit_user( None => err!("Invalid type"), }; - let mut user_to_edit = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) { + let mut user_to_edit = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn).await { Some(user) => user, None => err!("The specified user isn't member of the organization"), }; @@ -948,7 +990,7 @@ fn edit_user( if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner { // Removing owner permmission, check that there are at least another owner - let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len(); + let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).await.len(); if num_owners <= 1 { err!("Can't delete the last owner") @@ -959,14 +1001,14 @@ fn edit_user( user_to_edit.atype = new_type as i32; // Delete all the odd collections - for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) { - c.delete(&conn)?; + for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn).await { + c.delete(&conn).await?; } // If no accessAll, add the collections received if !data.AccessAll { for col in data.Collections.iter().flatten() { - match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) { + match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn).await { None => err!("Collection not found in Organization"), Some(collection) => { CollectionUser::save( @@ -975,22 +1017,28 @@ fn edit_user( col.ReadOnly, col.HidePasswords, &conn, - )?; + ) + .await?; } } } } - user_to_edit.save(&conn) + user_to_edit.save(&conn).await } #[delete("/organizations//users", data = "")] -fn bulk_delete_user(org_id: String, data: JsonUpcase, headers: AdminHeaders, conn: DbConn) -> Json { +async fn bulk_delete_user( + org_id: String, + data: JsonUpcase, + headers: AdminHeaders, + conn: DbConn, +) -> Json { let data: OrgBulkIds = data.into_inner().data; let mut bulk_response = Vec::new(); for org_user_id in data.Ids { - let err_msg = match _delete_user(&org_id, &org_user_id, &headers, &conn) { + let err_msg = match _delete_user(&org_id, &org_user_id, &headers, &conn).await { Ok(_) => String::from(""), Err(e) => format!("{:?}", e), }; @@ -1012,12 +1060,12 @@ fn bulk_delete_user(org_id: String, data: JsonUpcase, headers: Admin } #[delete("/organizations//users/")] -fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { - _delete_user(&org_id, &org_user_id, &headers, &conn) +async fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { + _delete_user(&org_id, &org_user_id, &headers, &conn).await } -fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, conn: &DbConn) -> EmptyResult { - let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn) { +async fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, conn: &DbConn) -> EmptyResult { + let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { Some(user) => user, None => err!("User to delete isn't member of the organization"), }; @@ -1028,23 +1076,28 @@ fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, conn: & if user_to_delete.atype == UserOrgType::Owner { // Removing owner, check that there are at least another owner - let num_owners = UserOrganization::find_by_org_and_type(org_id, UserOrgType::Owner as i32, conn).len(); + let num_owners = UserOrganization::find_by_org_and_type(org_id, UserOrgType::Owner as i32, conn).await.len(); if num_owners <= 1 { err!("Can't delete the last owner") } } - user_to_delete.delete(conn) + user_to_delete.delete(conn).await } #[post("/organizations//users//delete")] -fn post_delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { - delete_user(org_id, org_user_id, headers, conn) +async fn post_delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { + delete_user(org_id, org_user_id, headers, conn).await } #[post("/organizations//users/public-keys", data = "")] -fn bulk_public_keys(org_id: String, data: JsonUpcase, _headers: AdminHeaders, conn: DbConn) -> Json { +async fn bulk_public_keys( + org_id: String, + data: JsonUpcase, + _headers: AdminHeaders, + conn: DbConn, +) -> Json { let data: OrgBulkIds = data.into_inner().data; let mut bulk_response = Vec::new(); @@ -1052,8 +1105,8 @@ fn bulk_public_keys(org_id: String, data: JsonUpcase, _headers: Admi // If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID. // The web-vault will then ignore that user for the folowing steps. for user_org_id in data.Ids { - match UserOrganization::find_by_uuid_and_org(&user_org_id, &org_id, &conn) { - Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &conn) { + match UserOrganization::find_by_uuid_and_org(&user_org_id, &org_id, &conn).await { + Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &conn).await { Some(user) => bulk_response.push(json!( { "Object": "organizationUserPublicKeyResponseModel", @@ -1096,29 +1149,27 @@ struct RelationsData { } #[post("/ciphers/import-organization?", data = "")] -fn post_org_import( +async fn post_org_import( query: OrgIdData, data: JsonUpcase, headers: AdminHeaders, conn: DbConn, - nt: Notify, + nt: Notify<'_>, ) -> EmptyResult { let data: ImportData = data.into_inner().data; let org_id = query.organization_id; - // Read and create the collections - let collections: Vec<_> = data - .Collections - .into_iter() - .map(|coll| { + let collections = stream::iter(data.Collections) + .then(|coll| async { let collection = Collection::new(org_id.clone(), coll.Name); - if collection.save(&conn).is_err() { + if collection.save(&conn).await.is_err() { err!("Failed to create Collection"); } Ok(collection) }) - .collect(); + .collect::>() + .await; // Read the relations between collections and ciphers let mut relations = Vec::new(); @@ -1128,17 +1179,16 @@ fn post_org_import( let headers: Headers = headers.into(); - // Read and create the ciphers - let ciphers: Vec<_> = data - .Ciphers - .into_iter() - .map(|cipher_data| { + let ciphers = stream::iter(data.Ciphers) + .then(|cipher_data| async { let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherCreate) + .await .ok(); cipher }) - .collect(); + .collect::>() + .await; // Assign the collections for (cipher_index, coll_index) in relations { @@ -1149,16 +1199,16 @@ fn post_org_import( Err(_) => err!("Failed to assign to collection"), }; - CollectionCipher::save(cipher_id, coll_id, &conn)?; + CollectionCipher::save(cipher_id, coll_id, &conn).await?; } let mut user = headers.user; - user.update_revision(&conn) + user.update_revision(&conn).await } #[get("/organizations//policies")] -fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json { - let policies = OrgPolicy::find_by_org(&org_id, &conn); +async fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json { + let policies = OrgPolicy::find_by_org(&org_id, &conn).await; let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); Json(json!({ @@ -1169,7 +1219,7 @@ fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json/policies/token?")] -fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResult { +async fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResult { let invite = crate::auth::decode_invite(&token)?; let invite_org_id = match invite.org_id { @@ -1182,7 +1232,7 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul } // TODO: We receive the invite token as ?token=<>, validate it contains the org id - let policies = OrgPolicy::find_by_org(&org_id, &conn); + let policies = OrgPolicy::find_by_org(&org_id, &conn).await; let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); Ok(Json(json!({ @@ -1193,13 +1243,13 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul } #[get("/organizations//policies/")] -fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult { +async fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult { let pol_type_enum = match OrgPolicyType::from_i32(pol_type) { Some(pt) => pt, None => err!("Invalid or unsupported policy type"), }; - let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) { + let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn).await { Some(p) => p, None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()), }; @@ -1216,7 +1266,7 @@ struct PolicyData { } #[put("/organizations//policies/", data = "")] -fn put_policy( +async fn put_policy( org_id: String, pol_type: i32, data: Json, @@ -1232,10 +1282,8 @@ fn put_policy( // If enabling the TwoFactorAuthentication policy, remove this org's members that do have 2FA if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled { - let org_members = UserOrganization::find_by_org(&org_id, &conn); - - for member in org_members.into_iter() { - let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &conn).is_empty(); + for member in UserOrganization::find_by_org(&org_id, &conn).await.into_iter() { + let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &conn).await.is_empty(); // Policy only applies to non-Owner/non-Admin members who have accepted joining the org if user_twofactor_disabled @@ -1243,24 +1291,23 @@ fn put_policy( && member.status != UserOrgStatus::Invited as i32 { if CONFIG.mail_enabled() { - let org = Organization::find_by_uuid(&member.org_uuid, &conn).unwrap(); - let user = User::find_by_uuid(&member.user_uuid, &conn).unwrap(); + let org = Organization::find_by_uuid(&member.org_uuid, &conn).await.unwrap(); + let user = User::find_by_uuid(&member.user_uuid, &conn).await.unwrap(); mail::send_2fa_removed_from_org(&user.email, &org.name)?; } - member.delete(&conn)?; + member.delete(&conn).await?; } } } // If enabling the SingleOrg policy, remove this org's members that are members of other orgs if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled { - let org_members = UserOrganization::find_by_org(&org_id, &conn); - - for member in org_members.into_iter() { + for member in UserOrganization::find_by_org(&org_id, &conn).await.into_iter() { // Policy only applies to non-Owner/non-Admin members who have accepted joining the org if member.atype < UserOrgType::Admin && member.status != UserOrgStatus::Invited as i32 { let is_member_of_another_org = UserOrganization::find_any_state_by_user(&member.user_uuid, &conn) + .await .into_iter() // Other UserOrganization's where they have accepted being a member of .filter(|uo| uo.uuid != member.uuid && uo.status != UserOrgStatus::Invited as i32) @@ -1269,25 +1316,25 @@ fn put_policy( if is_member_of_another_org { if CONFIG.mail_enabled() { - let org = Organization::find_by_uuid(&member.org_uuid, &conn).unwrap(); - let user = User::find_by_uuid(&member.user_uuid, &conn).unwrap(); + let org = Organization::find_by_uuid(&member.org_uuid, &conn).await.unwrap(); + let user = User::find_by_uuid(&member.user_uuid, &conn).await.unwrap(); mail::send_single_org_removed_from_org(&user.email, &org.name)?; } - member.delete(&conn)?; + member.delete(&conn).await?; } } } } - let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) { + let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn).await { Some(p) => p, None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()), }; policy.enabled = data.enabled; policy.data = serde_json::to_string(&data.data)?; - policy.save(&conn)?; + policy.save(&conn).await?; Ok(Json(policy.to_json())) } @@ -1360,7 +1407,7 @@ struct OrgImportData { } #[post("/organizations//import", data = "")] -fn import(org_id: String, data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn import(org_id: String, data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data = data.into_inner().data; // TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way @@ -1369,7 +1416,7 @@ fn import(org_id: String, data: JsonUpcase, headers: Headers, con // as opposed to upstream which only removes auto-imported users. // User needs to be admin or owner to use the Directry Connector - match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) { + match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await { Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ } Some(_) => err!("User has insufficient permissions to use Directory Connector"), None => err!("User not part of organization"), @@ -1378,13 +1425,13 @@ fn import(org_id: String, data: JsonUpcase, headers: Headers, con for user_data in &data.Users { if user_data.Deleted { // If user is marked for deletion and it exists, delete it - if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn) { - user_org.delete(&conn)?; + if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).await { + user_org.delete(&conn).await?; } // If user is not part of the organization, but it exists - } else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() { - if let Some(user) = User::find_by_mail(&user_data.Email, &conn) { + } else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).await.is_none() { + if let Some(user) = User::find_by_mail(&user_data.Email, &conn).await { let user_org_status = if CONFIG.mail_enabled() { UserOrgStatus::Invited as i32 } else { @@ -1396,10 +1443,10 @@ fn import(org_id: String, data: JsonUpcase, headers: Headers, con new_org_user.atype = UserOrgType::User as i32; new_org_user.status = user_org_status; - new_org_user.save(&conn)?; + new_org_user.save(&conn).await?; if CONFIG.mail_enabled() { - let org_name = match Organization::find_by_uuid(&org_id, &conn) { + let org_name = match Organization::find_by_uuid(&org_id, &conn).await { Some(org) => org.name, None => err!("Error looking up organization"), }; @@ -1419,10 +1466,10 @@ fn import(org_id: String, data: JsonUpcase, headers: Headers, con // If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true) if data.OverwriteExisting { - for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) { - if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) { + for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn).await { + if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).await.map(|u| u.email) { if !data.Users.iter().any(|u| u.Email == user_email) { - user_org.delete(&conn)?; + user_org.delete(&conn).await?; } } } diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs index a185fe89..f3042278 100644 --- a/src/api/core/sends.rs +++ b/src/api/core/sends.rs @@ -35,7 +35,7 @@ pub fn routes() -> Vec { pub async fn purge_sends(pool: DbPool) { debug!("Purging sends"); if let Ok(conn) = pool.get().await { - Send::purge(&conn); + Send::purge(&conn).await; } else { error!("Failed to get DB connection while purging sends") } @@ -68,10 +68,10 @@ struct SendData { /// /// There is also a Vaultwarden-specific `sends_allowed` config setting that /// controls this policy globally. -fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult { +async fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult { let user_uuid = &headers.user.uuid; let policy_type = OrgPolicyType::DisableSend; - if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) { + if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn).await { err!("Due to an Enterprise Policy, you are only able to delete an existing Send.") } Ok(()) @@ -83,10 +83,10 @@ fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult /// but is allowed to remove this option from an existing Send. /// /// Ref: https://bitwarden.com/help/article/policies/#send-options -fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult { +async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult { let user_uuid = &headers.user.uuid; let hide_email = data.HideEmail.unwrap_or(false); - if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn) { + if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await { err!( "Due to an Enterprise Policy, you are not allowed to hide your email address \ from recipients when creating or editing a Send." @@ -95,7 +95,7 @@ fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: & Ok(()) } -fn create_send(data: SendData, user_uuid: String) -> ApiResult { +async fn create_send(data: SendData, user_uuid: String) -> ApiResult { let data_val = if data.Type == SendType::Text as i32 { data.Text } else if data.Type == SendType::File as i32 { @@ -117,7 +117,7 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult { ); } - let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc()); + let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc()).await; send.user_uuid = Some(user_uuid); send.notes = data.Notes; send.max_access_count = match data.MaxAccessCount { @@ -135,9 +135,9 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult { } #[get("/sends")] -fn get_sends(headers: Headers, conn: DbConn) -> Json { +async fn get_sends(headers: Headers, conn: DbConn) -> Json { let sends = Send::find_by_user(&headers.user.uuid, &conn); - let sends_json: Vec = sends.iter().map(|s| s.to_json()).collect(); + let sends_json: Vec = sends.await.iter().map(|s| s.to_json()).collect(); Json(json!({ "Data": sends_json, @@ -147,8 +147,8 @@ fn get_sends(headers: Headers, conn: DbConn) -> Json { } #[get("/sends/")] -fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { - let send = match Send::find_by_uuid(&uuid, &conn) { +async fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { + let send = match Send::find_by_uuid(&uuid, &conn).await { Some(send) => send, None => err!("Send not found"), }; @@ -161,19 +161,19 @@ fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult { } #[post("/sends", data = "")] -fn post_send(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - enforce_disable_send_policy(&headers, &conn)?; +async fn post_send(data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; let data: SendData = data.into_inner().data; - enforce_disable_hide_email_policy(&data, &headers, &conn)?; + enforce_disable_hide_email_policy(&data, &headers, &conn).await?; if data.Type == SendType::File as i32 { err!("File sends should use /api/sends/file") } - let mut send = create_send(data, headers.user.uuid)?; - send.save(&conn)?; - nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn)); + let mut send = create_send(data, headers.user.uuid).await?; + send.save(&conn).await?; + nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn).await); Ok(Json(send.to_json())) } @@ -186,7 +186,7 @@ struct UploadData<'f> { #[post("/sends/file", format = "multipart/form-data", data = "")] async fn post_send_file(data: Form>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { - enforce_disable_send_policy(&headers, &conn)?; + enforce_disable_send_policy(&headers, &conn).await?; let UploadData { model, @@ -194,7 +194,7 @@ async fn post_send_file(data: Form>, headers: Headers, conn: DbCo } = data.into_inner(); let model = model.into_inner().data; - enforce_disable_hide_email_policy(&model, &headers, &conn)?; + enforce_disable_hide_email_policy(&model, &headers, &conn).await?; // Get the file length and add an extra 5% to avoid issues const SIZE_525_MB: u64 = 550_502_400; @@ -202,7 +202,7 @@ async fn post_send_file(data: Form>, headers: Headers, conn: DbCo let size_limit = match CONFIG.user_attachment_limit() { Some(0) => err!("File uploads are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn); + let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn).await; if left <= 0 { err!("Attachment storage limit reached! Delete some attachments to free up space") } @@ -211,7 +211,7 @@ async fn post_send_file(data: Form>, headers: Headers, conn: DbCo None => SIZE_525_MB, }; - let mut send = create_send(model, headers.user.uuid)?; + let mut send = create_send(model, headers.user.uuid).await?; if send.atype != SendType::File as i32 { err!("Send content is not a file"); } @@ -236,8 +236,8 @@ async fn post_send_file(data: Form>, headers: Headers, conn: DbCo send.data = serde_json::to_string(&data_value)?; // Save the changes in the database - send.save(&conn)?; - nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn)); + send.save(&conn).await?; + nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await); Ok(Json(send.to_json())) } @@ -249,8 +249,8 @@ pub struct SendAccessData { } #[post("/sends/access/", data = "")] -fn post_access(access_id: String, data: JsonUpcase, conn: DbConn) -> JsonResult { - let mut send = match Send::find_by_access_id(&access_id, &conn) { +async fn post_access(access_id: String, data: JsonUpcase, conn: DbConn) -> JsonResult { + let mut send = match Send::find_by_access_id(&access_id, &conn).await { Some(s) => s, None => err_code!(SEND_INACCESSIBLE_MSG, 404), }; @@ -288,20 +288,20 @@ fn post_access(access_id: String, data: JsonUpcase, conn: DbConn send.access_count += 1; } - send.save(&conn)?; + send.save(&conn).await?; - Ok(Json(send.to_json_access(&conn))) + Ok(Json(send.to_json_access(&conn).await)) } #[post("/sends//access/file/", data = "")] -fn post_access_file( +async fn post_access_file( send_id: String, file_id: String, data: JsonUpcase, host: Host, conn: DbConn, ) -> JsonResult { - let mut send = match Send::find_by_uuid(&send_id, &conn) { + let mut send = match Send::find_by_uuid(&send_id, &conn).await { Some(s) => s, None => err_code!(SEND_INACCESSIBLE_MSG, 404), }; @@ -336,7 +336,7 @@ fn post_access_file( send.access_count += 1; - send.save(&conn)?; + send.save(&conn).await?; let token_claims = crate::auth::generate_send_claims(&send_id, &file_id); let token = crate::auth::encode_jwt(&token_claims); @@ -358,13 +358,19 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> O } #[put("/sends/", data = "")] -fn put_send(id: String, data: JsonUpcase, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - enforce_disable_send_policy(&headers, &conn)?; +async fn put_send( + id: String, + data: JsonUpcase, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; let data: SendData = data.into_inner().data; - enforce_disable_hide_email_policy(&data, &headers, &conn)?; + enforce_disable_hide_email_policy(&data, &headers, &conn).await?; - let mut send = match Send::find_by_uuid(&id, &conn) { + let mut send = match Send::find_by_uuid(&id, &conn).await { Some(s) => s, None => err!("Send not found"), }; @@ -411,15 +417,15 @@ fn put_send(id: String, data: JsonUpcase, headers: Headers, conn: DbCo send.set_password(Some(&password)); } - send.save(&conn)?; - nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn)); + send.save(&conn).await?; + nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await); Ok(Json(send.to_json())) } #[delete("/sends/")] -fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult { - let send = match Send::find_by_uuid(&id, &conn) { +async fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let send = match Send::find_by_uuid(&id, &conn).await { Some(s) => s, None => err!("Send not found"), }; @@ -428,17 +434,17 @@ fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyR err!("Send is not owned by user") } - send.delete(&conn)?; - nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&conn)); + send.delete(&conn).await?; + nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&conn).await); Ok(()) } #[put("/sends//remove-password")] -fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult { - enforce_disable_send_policy(&headers, &conn)?; +async fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; - let mut send = match Send::find_by_uuid(&id, &conn) { + let mut send = match Send::find_by_uuid(&id, &conn).await { Some(s) => s, None => err!("Send not found"), }; @@ -448,8 +454,8 @@ fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) - } send.set_password(None); - send.save(&conn)?; - nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn)); + send.save(&conn).await?; + nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await); Ok(Json(send.to_json())) } diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs index 7d80cb54..40ff233c 100644 --- a/src/api/core/two_factor/authenticator.rs +++ b/src/api/core/two_factor/authenticator.rs @@ -21,7 +21,7 @@ pub fn routes() -> Vec { } #[post("/two-factor/get-authenticator", data = "")] -fn generate_authenticator(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn generate_authenticator(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordData = data.into_inner().data; let user = headers.user; @@ -30,7 +30,7 @@ fn generate_authenticator(data: JsonUpcase, headers: Headers, conn } let type_ = TwoFactorType::Authenticator as i32; - let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn); + let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await; let (enabled, key) = match twofactor { Some(tf) => (true, tf.data), @@ -53,7 +53,7 @@ struct EnableAuthenticatorData { } #[post("/two-factor/authenticator", data = "")] -fn activate_authenticator( +async fn activate_authenticator( data: JsonUpcase, headers: Headers, ip: ClientIp, @@ -81,9 +81,9 @@ fn activate_authenticator( } // Validate the token provided with the key, and save new twofactor - validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &conn)?; + validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &conn).await?; - _generate_recover_code(&mut user, &conn); + _generate_recover_code(&mut user, &conn).await; Ok(Json(json!({ "Enabled": true, @@ -93,16 +93,16 @@ fn activate_authenticator( } #[put("/two-factor/authenticator", data = "")] -fn activate_authenticator_put( +async fn activate_authenticator_put( data: JsonUpcase, headers: Headers, ip: ClientIp, conn: DbConn, ) -> JsonResult { - activate_authenticator(data, headers, ip, conn) + activate_authenticator(data, headers, ip, conn).await } -pub fn validate_totp_code_str( +pub async fn validate_totp_code_str( user_uuid: &str, totp_code: &str, secret: &str, @@ -113,10 +113,16 @@ pub fn validate_totp_code_str( err!("TOTP code is not a number"); } - validate_totp_code(user_uuid, totp_code, secret, ip, conn) + validate_totp_code(user_uuid, totp_code, secret, ip, conn).await } -pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult { +pub async fn validate_totp_code( + user_uuid: &str, + totp_code: &str, + secret: &str, + ip: &ClientIp, + conn: &DbConn, +) -> EmptyResult { use totp_lite::{totp_custom, Sha1}; let decoded_secret = match BASE32.decode(secret.as_bytes()) { @@ -124,10 +130,11 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C Err(_) => err!("Invalid TOTP secret"), }; - let mut twofactor = match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn) { - Some(tf) => tf, - _ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()), - }; + let mut twofactor = + match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await { + Some(tf) => tf, + _ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()), + }; // The amount of steps back and forward in time // Also check if we need to disable time drifted TOTP codes. @@ -156,7 +163,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C // Save the last used time step so only totp time steps higher then this one are allowed. // This will also save a newly created twofactor if the code is correct. twofactor.last_used = time_step as i32; - twofactor.save(conn)?; + twofactor.save(conn).await?; return Ok(()); } else if generated == totp_code && time_step <= twofactor.last_used as i64 { warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps); diff --git a/src/api/core/two_factor/duo.rs b/src/api/core/two_factor/duo.rs index 1450cef0..ccfa05be 100644 --- a/src/api/core/two_factor/duo.rs +++ b/src/api/core/two_factor/duo.rs @@ -89,14 +89,14 @@ impl DuoStatus { const DISABLED_MESSAGE_DEFAULT: &str = ""; #[post("/two-factor/get-duo", data = "")] -fn get_duo(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn get_duo(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordData = data.into_inner().data; if !headers.user.check_valid_password(&data.MasterPasswordHash) { err!("Invalid password"); } - let data = get_user_duo_data(&headers.user.uuid, &conn); + let data = get_user_duo_data(&headers.user.uuid, &conn).await; let (enabled, data) = match data { DuoStatus::Global(_) => (true, Some(DuoData::secret())), @@ -171,9 +171,9 @@ async fn activate_duo(data: JsonUpcase, headers: Headers, conn: D let type_ = TwoFactorType::Duo; let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str); - twofactor.save(&conn)?; + twofactor.save(&conn).await?; - _generate_recover_code(&mut user, &conn); + _generate_recover_code(&mut user, &conn).await; Ok(Json(json!({ "Enabled": true, @@ -223,11 +223,11 @@ const AUTH_PREFIX: &str = "AUTH"; const DUO_PREFIX: &str = "TX"; const APP_PREFIX: &str = "APP"; -fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus { +async fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus { let type_ = TwoFactorType::Duo as i32; // If the user doesn't have an entry, disabled - let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn) { + let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await { Some(t) => t, None => return DuoStatus::Disabled(DuoData::global().is_some()), }; @@ -247,19 +247,20 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus { } // let (ik, sk, ak, host) = get_duo_keys(); -fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> { - let data = User::find_by_mail(email, conn) - .and_then(|u| get_user_duo_data(&u.uuid, conn).data()) - .or_else(DuoData::global) - .map_res("Can't fetch Duo keys")?; +async fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> { + let data = match User::find_by_mail(email, conn).await { + Some(u) => get_user_duo_data(&u.uuid, conn).await.data(), + _ => DuoData::global(), + } + .map_res("Can't fetch Duo Keys")?; Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host)) } -pub fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> { +pub async fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> { let now = Utc::now().timestamp(); - let (ik, sk, ak, host) = get_duo_keys_email(email, conn)?; + let (ik, sk, ak, host) = get_duo_keys_email(email, conn).await?; let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE); let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE); @@ -274,7 +275,7 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64 format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie)) } -pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult { +pub async fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult { // email is as entered by the user, so it needs to be normalized before // comparison with auth_user below. let email = &email.to_lowercase(); @@ -289,7 +290,7 @@ pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyRe let now = Utc::now().timestamp(); - let (ik, sk, ak, _host) = get_duo_keys_email(email, conn)?; + let (ik, sk, ak, _host) = get_duo_keys_email(email, conn).await?; let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?; let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?; diff --git a/src/api/core/two_factor/email.rs b/src/api/core/two_factor/email.rs index 51487ee3..f6dfd77a 100644 --- a/src/api/core/two_factor/email.rs +++ b/src/api/core/two_factor/email.rs @@ -28,13 +28,13 @@ struct SendEmailLoginData { /// User is trying to login and wants to use email 2FA. /// Does not require Bearer token #[post("/two-factor/send-email-login", data = "")] // JsonResult -fn send_email_login(data: JsonUpcase, conn: DbConn) -> EmptyResult { +async fn send_email_login(data: JsonUpcase, conn: DbConn) -> EmptyResult { let data: SendEmailLoginData = data.into_inner().data; use crate::db::models::User; // Get the user - let user = match User::find_by_mail(&data.Email, &conn) { + let user = match User::find_by_mail(&data.Email, &conn).await { Some(user) => user, None => err!("Username or password is incorrect. Try again."), }; @@ -48,22 +48,23 @@ fn send_email_login(data: JsonUpcase, conn: DbConn) -> Empty err!("Email 2FA is disabled") } - send_token(&user.uuid, &conn)?; + send_token(&user.uuid, &conn).await?; Ok(()) } /// Generate the token, save the data for later verification and send email to user -pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult { +pub async fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult { let type_ = TwoFactorType::Email as i32; - let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, conn).map_res("Two factor not found")?; + let mut twofactor = + TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?; let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?; twofactor_data.set_token(generated_token); twofactor.data = twofactor_data.to_json(); - twofactor.save(conn)?; + twofactor.save(conn).await?; mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?; @@ -72,7 +73,7 @@ pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult { /// When user clicks on Manage email 2FA show the user the related information #[post("/two-factor/get-email", data = "")] -fn get_email(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn get_email(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordData = data.into_inner().data; let user = headers.user; @@ -80,13 +81,14 @@ fn get_email(data: JsonUpcase, headers: Headers, conn: DbConn) -> err!("Invalid password"); } - let (enabled, mfa_email) = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &conn) { - Some(x) => { - let twofactor_data = EmailTokenData::from_json(&x.data)?; - (true, json!(twofactor_data.email)) - } - _ => (false, json!(null)), - }; + let (enabled, mfa_email) = + match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &conn).await { + Some(x) => { + let twofactor_data = EmailTokenData::from_json(&x.data)?; + (true, json!(twofactor_data.email)) + } + _ => (false, json!(null)), + }; Ok(Json(json!({ "Email": mfa_email, @@ -105,7 +107,7 @@ struct SendEmailData { /// Send a verification email to the specified email address to check whether it exists/belongs to user. #[post("/two-factor/send-email", data = "")] -fn send_email(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { +async fn send_email(data: JsonUpcase, headers: Headers, conn: DbConn) -> EmptyResult { let data: SendEmailData = data.into_inner().data; let user = headers.user; @@ -119,8 +121,8 @@ fn send_email(data: JsonUpcase, headers: Headers, conn: DbConn) - let type_ = TwoFactorType::Email as i32; - if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) { - tf.delete(&conn)?; + if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await { + tf.delete(&conn).await?; } let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); @@ -128,7 +130,7 @@ fn send_email(data: JsonUpcase, headers: Headers, conn: DbConn) - // Uses EmailVerificationChallenge as type to show that it's not verified yet. let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json()); - twofactor.save(&conn)?; + twofactor.save(&conn).await?; mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?; @@ -145,7 +147,7 @@ struct EmailData { /// Verify email belongs to user and can be used for 2FA email codes. #[put("/two-factor/email", data = "")] -fn email(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn email(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: EmailData = data.into_inner().data; let mut user = headers.user; @@ -154,7 +156,8 @@ fn email(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonRes } let type_ = TwoFactorType::EmailVerificationChallenge as i32; - let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?; + let mut twofactor = + TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await.map_res("Two factor not found")?; let mut email_data = EmailTokenData::from_json(&twofactor.data)?; @@ -170,9 +173,9 @@ fn email(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonRes email_data.reset_token(); twofactor.atype = TwoFactorType::Email as i32; twofactor.data = email_data.to_json(); - twofactor.save(&conn)?; + twofactor.save(&conn).await?; - _generate_recover_code(&mut user, &conn); + _generate_recover_code(&mut user, &conn).await; Ok(Json(json!({ "Email": email_data.email, @@ -182,9 +185,10 @@ fn email(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonRes } /// Validate the email code when used as TwoFactor token mechanism -pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult { +pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult { let mut email_data = EmailTokenData::from_json(data)?; let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn) + .await .map_res("Two factor not found")?; let issued_token = match &email_data.last_token { Some(t) => t, @@ -197,14 +201,14 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: & email_data.reset_token(); } twofactor.data = email_data.to_json(); - twofactor.save(conn)?; + twofactor.save(conn).await?; err!("Token is invalid") } email_data.reset_token(); twofactor.data = email_data.to_json(); - twofactor.save(conn)?; + twofactor.save(conn).await?; let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0); let max_time = CONFIG.email_expiration_time() as i64; diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs index 105fe9eb..0f3bd14e 100644 --- a/src/api/core/two_factor/mod.rs +++ b/src/api/core/two_factor/mod.rs @@ -33,8 +33,8 @@ pub fn routes() -> Vec { } #[get("/two-factor")] -fn get_twofactor(headers: Headers, conn: DbConn) -> Json { - let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn); +async fn get_twofactor(headers: Headers, conn: DbConn) -> Json { + let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn).await; let twofactors_json: Vec = twofactors.iter().map(TwoFactor::to_json_provider).collect(); Json(json!({ @@ -68,13 +68,13 @@ struct RecoverTwoFactor { } #[post("/two-factor/recover", data = "")] -fn recover(data: JsonUpcase, conn: DbConn) -> JsonResult { +async fn recover(data: JsonUpcase, conn: DbConn) -> JsonResult { let data: RecoverTwoFactor = data.into_inner().data; use crate::db::models::User; // Get the user - let mut user = match User::find_by_mail(&data.Email, &conn) { + let mut user = match User::find_by_mail(&data.Email, &conn).await { Some(user) => user, None => err!("Username or password is incorrect. Try again."), }; @@ -90,19 +90,19 @@ fn recover(data: JsonUpcase, conn: DbConn) -> JsonResult { } // Remove all twofactors from the user - TwoFactor::delete_all_by_user(&user.uuid, &conn)?; + TwoFactor::delete_all_by_user(&user.uuid, &conn).await?; // Remove the recovery code, not needed without twofactors user.totp_recover = None; - user.save(&conn)?; + user.save(&conn).await?; Ok(Json(json!({}))) } -fn _generate_recover_code(user: &mut User, conn: &DbConn) { +async fn _generate_recover_code(user: &mut User, conn: &DbConn) { if user.totp_recover.is_none() { let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20])); user.totp_recover = Some(totp_recover); - user.save(conn).ok(); + user.save(conn).await.ok(); } } @@ -114,7 +114,7 @@ struct DisableTwoFactorData { } #[post("/two-factor/disable", data = "")] -fn disable_twofactor(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn disable_twofactor(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: DisableTwoFactorData = data.into_inner().data; let password_hash = data.MasterPasswordHash; let user = headers.user; @@ -125,23 +125,24 @@ fn disable_twofactor(data: JsonUpcase, headers: Headers, c let type_ = data.Type.into_i32()?; - if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) { - twofactor.delete(&conn)?; + if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await { + twofactor.delete(&conn).await?; } - let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).is_empty(); + let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).await.is_empty(); if twofactor_disabled { - let policy_type = OrgPolicyType::TwoFactorAuthentication; - let org_list = UserOrganization::find_by_user_and_policy(&user.uuid, policy_type, &conn); - - for user_org in org_list.into_iter() { + for user_org in + UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &conn) + .await + .into_iter() + { if user_org.atype < UserOrgType::Admin { if CONFIG.mail_enabled() { - let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap(); + let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).await.unwrap(); mail::send_2fa_removed_from_org(&user.email, &org.name)?; } - user_org.delete(&conn)?; + user_org.delete(&conn).await?; } } } @@ -154,8 +155,8 @@ fn disable_twofactor(data: JsonUpcase, headers: Headers, c } #[put("/two-factor/disable", data = "")] -fn disable_twofactor_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - disable_twofactor(data, headers, conn) +async fn disable_twofactor_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + disable_twofactor(data, headers, conn).await } pub async fn send_incomplete_2fa_notifications(pool: DbPool) { @@ -175,15 +176,16 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) { let now = Utc::now().naive_utc(); let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit()); - let incomplete_logins = TwoFactorIncomplete::find_logins_before(&(now - time_limit), &conn); + let time_before = now - time_limit; + let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &conn).await; for login in incomplete_logins { - let user = User::find_by_uuid(&login.user_uuid, &conn).expect("User not found"); + let user = User::find_by_uuid(&login.user_uuid, &conn).await.expect("User not found"); info!( "User {} did not complete a 2FA login within the configured time limit. IP: {}", user.email, login.ip_address ); mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name) .expect("Error sending incomplete 2FA email"); - login.delete(&conn).expect("Error deleting incomplete 2FA record"); + login.delete(&conn).await.expect("Error deleting incomplete 2FA record"); } } diff --git a/src/api/core/two_factor/u2f.rs b/src/api/core/two_factor/u2f.rs index cb24bcb3..5ae976c8 100644 --- a/src/api/core/two_factor/u2f.rs +++ b/src/api/core/two_factor/u2f.rs @@ -32,7 +32,7 @@ pub fn routes() -> Vec { } #[post("/two-factor/get-u2f", data = "")] -fn generate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn generate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { if !CONFIG.domain_set() { err!("`DOMAIN` environment variable is not set. U2F disabled") } @@ -42,7 +42,7 @@ fn generate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) err!("Invalid password"); } - let (enabled, keys) = get_u2f_registrations(&headers.user.uuid, &conn)?; + let (enabled, keys) = get_u2f_registrations(&headers.user.uuid, &conn).await?; let keys_json: Vec = keys.iter().map(U2FRegistration::to_json).collect(); Ok(Json(json!({ @@ -53,7 +53,7 @@ fn generate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) } #[post("/two-factor/get-u2f-challenge", data = "")] -fn generate_u2f_challenge(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn generate_u2f_challenge(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordData = data.into_inner().data; if !headers.user.check_valid_password(&data.MasterPasswordHash) { @@ -61,7 +61,7 @@ fn generate_u2f_challenge(data: JsonUpcase, headers: Headers, conn } let _type = TwoFactorType::U2fRegisterChallenge; - let challenge = _create_u2f_challenge(&headers.user.uuid, _type, &conn).challenge; + let challenge = _create_u2f_challenge(&headers.user.uuid, _type, &conn).await.challenge; Ok(Json(json!({ "UserId": headers.user.uuid, @@ -137,7 +137,7 @@ impl From for RegisterResponse { } #[post("/two-factor/u2f", data = "")] -fn activate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn activate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableU2FData = data.into_inner().data; let mut user = headers.user; @@ -146,13 +146,13 @@ fn activate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) } let tf_type = TwoFactorType::U2fRegisterChallenge as i32; - let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) { + let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn).await { Some(c) => c, None => err!("Can't recover challenge"), }; let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?; - tf_challenge.delete(&conn)?; + tf_challenge.delete(&conn).await?; let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?; @@ -172,13 +172,13 @@ fn activate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) migrated: None, }; - let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1; + let mut regs = get_u2f_registrations(&user.uuid, &conn).await?.1; // TODO: Check that there is no repeat Id regs.push(full_registration); - save_u2f_registrations(&user.uuid, ®s, &conn)?; + save_u2f_registrations(&user.uuid, ®s, &conn).await?; - _generate_recover_code(&mut user, &conn); + _generate_recover_code(&mut user, &conn).await; let keys_json: Vec = regs.iter().map(U2FRegistration::to_json).collect(); Ok(Json(json!({ @@ -189,8 +189,8 @@ fn activate_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) } #[put("/two-factor/u2f", data = "")] -fn activate_u2f_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - activate_u2f(data, headers, conn) +async fn activate_u2f_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + activate_u2f(data, headers, conn).await } #[derive(Deserialize, Debug)] @@ -201,7 +201,7 @@ struct DeleteU2FData { } #[delete("/two-factor/u2f", data = "")] -fn delete_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn delete_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: DeleteU2FData = data.into_inner().data; let id = data.Id.into_i32()?; @@ -211,7 +211,7 @@ fn delete_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) - } let type_ = TwoFactorType::U2f as i32; - let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn) { + let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn).await { Some(tf) => tf, None => err!("U2F data not found!"), }; @@ -226,7 +226,7 @@ fn delete_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) - let new_data_str = serde_json::to_string(&data)?; tf.data = new_data_str; - tf.save(&conn)?; + tf.save(&conn).await?; let keys_json: Vec = data.iter().map(U2FRegistration::to_json).collect(); @@ -237,23 +237,24 @@ fn delete_u2f(data: JsonUpcase, headers: Headers, conn: DbConn) - }))) } -fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge { +async fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge { let challenge = U2F.generate_challenge().unwrap(); TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap()) .save(conn) + .await .expect("Error saving challenge"); challenge } -fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult { - TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(conn) +async fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult { + TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(conn).await } -fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec), Error> { +async fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec), Error> { let type_ = TwoFactorType::U2f as i32; - let (enabled, regs) = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) { + let (enabled, regs) = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { Some(tf) => (tf.enabled, tf.data), None => return Ok((false, Vec::new())), // If no data, return empty list }; @@ -279,7 +280,7 @@ fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec Vec { regs.into_iter().map(|r| serde_json::from_value(r).unwrap()).map(|Helper(r)| r).collect() } -pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult { - let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn); +pub async fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult { + let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn).await; - let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.reg).collect(); + let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.reg).collect(); if registrations.is_empty() { err!("No U2F devices registered") @@ -309,20 +310,20 @@ pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult EmptyResult { +pub async fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult { let challenge_type = TwoFactorType::U2fLoginChallenge as i32; - let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, conn); + let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, conn).await; let challenge = match tf_challenge { Some(tf_challenge) => { let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?; - tf_challenge.delete(conn)?; + tf_challenge.delete(conn).await?; challenge } None => err!("Can't recover login challenge"), }; let response: SignResponse = serde_json::from_str(response)?; - let mut registrations = get_u2f_registrations(user_uuid, conn)?.1; + let mut registrations = get_u2f_registrations(user_uuid, conn).await?.1; if registrations.is_empty() { err!("No U2F devices registered") } @@ -332,13 +333,13 @@ pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> Emp match response { Ok(new_counter) => { reg.counter = new_counter; - save_u2f_registrations(user_uuid, ®istrations, conn)?; + save_u2f_registrations(user_uuid, ®istrations, conn).await?; return Ok(()); } Err(u2f::u2ferror::U2fError::CounterTooLow) => { reg.compromised = true; - save_u2f_registrations(user_uuid, ®istrations, conn)?; + save_u2f_registrations(user_uuid, ®istrations, conn).await?; err!("This device might be compromised!"); } diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs index 4dd0c294..bb18028d 100644 --- a/src/api/core/two_factor/webauthn.rs +++ b/src/api/core/two_factor/webauthn.rs @@ -80,7 +80,7 @@ impl WebauthnRegistration { } #[post("/two-factor/get-webauthn", data = "")] -fn get_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn get_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { if !CONFIG.domain_set() { err!("`DOMAIN` environment variable is not set. Webauthn disabled") } @@ -89,7 +89,7 @@ fn get_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) err!("Invalid password"); } - let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn)?; + let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn).await?; let registrations_json: Vec = registrations.iter().map(WebauthnRegistration::to_json).collect(); Ok(Json(json!({ @@ -100,12 +100,13 @@ fn get_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) } #[post("/two-factor/get-webauthn-challenge", data = "")] -fn generate_webauthn_challenge(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn generate_webauthn_challenge(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { if !headers.user.check_valid_password(&data.data.MasterPasswordHash) { err!("Invalid password"); } - let registrations = get_webauthn_registrations(&headers.user.uuid, &conn)? + let registrations = get_webauthn_registrations(&headers.user.uuid, &conn) + .await? .1 .into_iter() .map(|r| r.credential.cred_id) // We return the credentialIds to the clients to avoid double registering @@ -121,7 +122,7 @@ fn generate_webauthn_challenge(data: JsonUpcase, headers: Headers, )?; let type_ = TwoFactorType::WebauthnRegisterChallenge; - TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn)?; + TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn).await?; let mut challenge_value = serde_json::to_value(challenge.public_key)?; challenge_value["status"] = "ok".into(); @@ -218,7 +219,7 @@ impl From for PublicKeyCredential { } #[post("/two-factor/webauthn", data = "")] -fn activate_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn activate_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableWebauthnData = data.into_inner().data; let mut user = headers.user; @@ -228,10 +229,10 @@ fn activate_webauthn(data: JsonUpcase, headers: Headers, con // Retrieve and delete the saved challenge state let type_ = TwoFactorType::WebauthnRegisterChallenge as i32; - let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) { + let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await { Some(tf) => { let state: RegistrationState = serde_json::from_str(&tf.data)?; - tf.delete(&conn)?; + tf.delete(&conn).await?; state } None => err!("Can't recover challenge"), @@ -241,7 +242,7 @@ fn activate_webauthn(data: JsonUpcase, headers: Headers, con let (credential, _data) = WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?; - let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn)?.1; + let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn).await?.1; // TODO: Check for repeated ID's registrations.push(WebauthnRegistration { id: data.Id.into_i32()?, @@ -252,8 +253,10 @@ fn activate_webauthn(data: JsonUpcase, headers: Headers, con }); // Save the registrations and return them - TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?).save(&conn)?; - _generate_recover_code(&mut user, &conn); + TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?) + .save(&conn) + .await?; + _generate_recover_code(&mut user, &conn).await; let keys_json: Vec = registrations.iter().map(WebauthnRegistration::to_json).collect(); Ok(Json(json!({ @@ -264,8 +267,8 @@ fn activate_webauthn(data: JsonUpcase, headers: Headers, con } #[put("/two-factor/webauthn", data = "")] -fn activate_webauthn_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - activate_webauthn(data, headers, conn) +async fn activate_webauthn_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + activate_webauthn(data, headers, conn).await } #[derive(Deserialize, Debug)] @@ -276,13 +279,14 @@ struct DeleteU2FData { } #[delete("/two-factor/webauthn", data = "")] -fn delete_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn delete_webauthn(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let id = data.data.Id.into_i32()?; if !headers.user.check_valid_password(&data.data.MasterPasswordHash) { err!("Invalid password"); } - let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn) { + let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn).await + { Some(tf) => tf, None => err!("Webauthn data not found!"), }; @@ -296,11 +300,12 @@ fn delete_webauthn(data: JsonUpcase, headers: Headers, conn: DbCo let removed_item = data.remove(item_pos); tf.data = serde_json::to_string(&data)?; - tf.save(&conn)?; + tf.save(&conn).await?; drop(tf); // If entry is migrated from u2f, delete the u2f entry as well - if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn) { + if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn).await + { use crate::api::core::two_factor::u2f::U2FRegistration; let mut data: Vec = match serde_json::from_str(&u2f.data) { Ok(d) => d, @@ -311,7 +316,7 @@ fn delete_webauthn(data: JsonUpcase, headers: Headers, conn: DbCo let new_data_str = serde_json::to_string(&data)?; u2f.data = new_data_str; - u2f.save(&conn)?; + u2f.save(&conn).await?; } let keys_json: Vec = data.iter().map(WebauthnRegistration::to_json).collect(); @@ -323,18 +328,21 @@ fn delete_webauthn(data: JsonUpcase, headers: Headers, conn: DbCo }))) } -pub fn get_webauthn_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec), Error> { +pub async fn get_webauthn_registrations( + user_uuid: &str, + conn: &DbConn, +) -> Result<(bool, Vec), Error> { let type_ = TwoFactorType::Webauthn as i32; - match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) { + match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)), None => Ok((false, Vec::new())), // If no data, return empty list } } -pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult { +pub async fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult { // Load saved credentials let creds: Vec = - get_webauthn_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.credential).collect(); + get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect(); if creds.is_empty() { err!("No Webauthn devices registered") @@ -346,18 +354,19 @@ pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult { // Save the challenge state for later validation TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?) - .save(conn)?; + .save(conn) + .await?; // Return challenge to the clients Ok(Json(serde_json::to_value(response.public_key)?)) } -pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult { +pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult { let type_ = TwoFactorType::WebauthnLoginChallenge as i32; - let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) { + let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { Some(tf) => { let state: AuthenticationState = serde_json::from_str(&tf.data)?; - tf.delete(conn)?; + tf.delete(conn).await?; state } None => err!("Can't recover login challenge"), @@ -366,7 +375,7 @@ pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) - let rsp: crate::util::UpCase = serde_json::from_str(response)?; let rsp: PublicKeyCredential = rsp.data.into(); - let mut registrations = get_webauthn_registrations(user_uuid, conn)?.1; + let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1; // If the credential we received is migrated from U2F, enable the U2F compatibility //let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0); @@ -377,7 +386,8 @@ pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) - reg.credential.counter = auth_data.counter; TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?) - .save(conn)?; + .save(conn) + .await?; return Ok(()); } } diff --git a/src/api/core/two_factor/yubikey.rs b/src/api/core/two_factor/yubikey.rs index 618c755a..c430d1ed 100644 --- a/src/api/core/two_factor/yubikey.rs +++ b/src/api/core/two_factor/yubikey.rs @@ -78,7 +78,7 @@ fn verify_yubikey_otp(otp: String) -> EmptyResult { } #[post("/two-factor/get-yubikey", data = "")] -fn generate_yubikey(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn generate_yubikey(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { // Make sure the credentials are set get_yubico_credentials()?; @@ -92,7 +92,7 @@ fn generate_yubikey(data: JsonUpcase, headers: Headers, conn: DbCo let user_uuid = &user.uuid; let yubikey_type = TwoFactorType::YubiKey as i32; - let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn); + let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn).await; if let Some(r) = r { let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?; @@ -113,7 +113,7 @@ fn generate_yubikey(data: JsonUpcase, headers: Headers, conn: DbCo } #[post("/two-factor/yubikey", data = "")] -fn activate_yubikey(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { +async fn activate_yubikey(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableYubikeyData = data.into_inner().data; let mut user = headers.user; @@ -122,10 +122,11 @@ fn activate_yubikey(data: JsonUpcase, headers: Headers, conn: } // Check if we already have some data - let mut yubikey_data = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn) { - Some(data) => data, - None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()), - }; + let mut yubikey_data = + match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn).await { + Some(data) => data, + None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()), + }; let yubikeys = parse_yubikeys(&data); @@ -154,9 +155,9 @@ fn activate_yubikey(data: JsonUpcase, headers: Headers, conn: }; yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap(); - yubikey_data.save(&conn)?; + yubikey_data.save(&conn).await?; - _generate_recover_code(&mut user, &conn); + _generate_recover_code(&mut user, &conn).await; let mut result = jsonify_yubikeys(yubikey_metadata.Keys); @@ -168,8 +169,8 @@ fn activate_yubikey(data: JsonUpcase, headers: Headers, conn: } #[put("/two-factor/yubikey", data = "")] -fn activate_yubikey_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - activate_yubikey(data, headers, conn) +async fn activate_yubikey_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { + activate_yubikey(data, headers, conn).await } pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult { diff --git a/src/api/icons.rs b/src/api/icons.rs index 33656d02..6af10a35 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -745,6 +745,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { buffer = stream_to_bytes_limit(res, 512 * 1024).await?; // 512 KB for each icon max // Check if the icon type is allowed, else try an icon from the list. icon_type = get_icon_type(&buffer); + // Check if the icon type is allowed, else try an icon from the list. if icon_type.is_none() { buffer.clear(); debug!("Icon from {}, is not a valid image type", icon.href); diff --git a/src/api/identity.rs b/src/api/identity.rs index 0ad8a1b5..2c04990b 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -23,13 +23,13 @@ pub fn routes() -> Vec { } #[post("/connect/token", data = "")] -fn login(data: Form, conn: DbConn, ip: ClientIp) -> JsonResult { +async fn login(data: Form, conn: DbConn, ip: ClientIp) -> JsonResult { let data: ConnectData = data.into_inner(); match data.grant_type.as_ref() { "refresh_token" => { _check_is_some(&data.refresh_token, "refresh_token cannot be blank")?; - _refresh_login(data, conn) + _refresh_login(data, conn).await } "password" => { _check_is_some(&data.client_id, "client_id cannot be blank")?; @@ -41,34 +41,34 @@ fn login(data: Form, conn: DbConn, ip: ClientIp) -> JsonResult { _check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?; - _password_login(data, conn, &ip) + _password_login(data, conn, &ip).await } "client_credentials" => { _check_is_some(&data.client_id, "client_id cannot be blank")?; _check_is_some(&data.client_secret, "client_secret cannot be blank")?; _check_is_some(&data.scope, "scope cannot be blank")?; - _api_key_login(data, conn, &ip) + _api_key_login(data, conn, &ip).await } t => err!("Invalid type", t), } } -fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult { +async fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult { // Extract token let token = data.refresh_token.unwrap(); // Get device by refresh token - let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?; + let mut device = Device::find_by_refresh_token(&token, &conn).await.map_res("Invalid refresh token")?; let scope = "api offline_access"; let scope_vec = vec!["api".into(), "offline_access".into()]; // Common - let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap(); - let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn); + let user = User::find_by_uuid(&device.user_uuid, &conn).await.unwrap(); + let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); - device.save(&conn)?; + device.save(&conn).await?; Ok(Json(json!({ "access_token": access_token, @@ -86,7 +86,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult { }))) } -fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult { +async fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult { // Validate scope let scope = data.scope.as_ref().unwrap(); if scope != "api offline_access" { @@ -99,7 +99,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult // Get the user let username = data.username.as_ref().unwrap(); - let user = match User::find_by_mail(username, &conn) { + let user = match User::find_by_mail(username, &conn).await { Some(user) => user, None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)), }; @@ -130,7 +130,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult user.last_verifying_at = Some(now); user.login_verify_count += 1; - if let Err(e) = user.save(&conn) { + if let Err(e) = user.save(&conn).await { error!("Error updating user: {:#?}", e); } @@ -144,9 +144,9 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username)) } - let (mut device, new_device) = get_device(&data, &conn, &user); + let (mut device, new_device) = get_device(&data, &conn, &user).await; - let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &conn)?; + let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &conn).await?; if CONFIG.mail_enabled() && new_device { if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) { @@ -159,9 +159,9 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult } // Common - let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn); + let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); - device.save(&conn)?; + device.save(&conn).await?; let mut result = json!({ "access_token": access_token, @@ -187,7 +187,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult Ok(Json(result)) } -fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult { +async fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult { // Validate scope let scope = data.scope.as_ref().unwrap(); if scope != "api" { @@ -204,7 +204,7 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult Some(uuid) => uuid, None => err!("Malformed client_id", format!("IP: {}.", ip.ip)), }; - let user = match User::find_by_uuid(user_uuid, &conn) { + let user = match User::find_by_uuid(user_uuid, &conn).await { Some(user) => user, None => err!("Invalid client_id", format!("IP: {}.", ip.ip)), }; @@ -220,7 +220,7 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult err!("Incorrect client_secret", format!("IP: {}. Username: {}.", ip.ip, user.email)) } - let (mut device, new_device) = get_device(&data, &conn, &user); + let (mut device, new_device) = get_device(&data, &conn, &user).await; if CONFIG.mail_enabled() && new_device { let now = Utc::now().naive_utc(); @@ -234,9 +234,9 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult } // Common - let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn); + let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); - device.save(&conn)?; + device.save(&conn).await?; info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip); @@ -258,7 +258,7 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult } /// Retrieves an existing device or creates a new device from ConnectData and the User -fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) { +async fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) { // On iOS, device_type sends "iOS", on others it sends a number let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0); let device_id = data.device_identifier.clone().expect("No device id provided"); @@ -266,7 +266,7 @@ fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) let mut new_device = false; // Find device or create new - let device = match Device::find_by_uuid(&device_id, conn) { + let device = match Device::find_by_uuid(&device_id, conn).await { Some(device) => { // Check if owned device, and recreate if not if device.user_uuid != user.uuid { @@ -286,28 +286,28 @@ fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) (device, new_device) } -fn twofactor_auth( +async fn twofactor_auth( user_uuid: &str, data: &ConnectData, device: &mut Device, ip: &ClientIp, conn: &DbConn, ) -> ApiResult> { - let twofactors = TwoFactor::find_by_user(user_uuid, conn); + let twofactors = TwoFactor::find_by_user(user_uuid, conn).await; // No twofactor token if twofactor is disabled if twofactors.is_empty() { return Ok(None); } - TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn)?; + TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?; let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect(); let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one let twofactor_code = match data.two_factor_token { Some(ref code) => code, - None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA token not provided"), + None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"), }; let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled); @@ -320,16 +320,18 @@ fn twofactor_auth( match TwoFactorType::from_i32(selected_id) { Some(TwoFactorType::Authenticator) => { - _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)? + _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await? + } + Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn).await?, + Some(TwoFactorType::Webauthn) => { + _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await? } - Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?, - Some(TwoFactorType::Webauthn) => _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn)?, Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?, Some(TwoFactorType::Duo) => { - _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)? + _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn).await? } Some(TwoFactorType::Email) => { - _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)? + _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await? } Some(TwoFactorType::Remember) => { @@ -338,14 +340,17 @@ fn twofactor_auth( remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time } _ => { - err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided") + err_json!( + _json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, + "2FA Remember token not provided" + ) } } } _ => err!("Invalid two factor provider"), } - TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn)?; + TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?; if !CONFIG.disable_2fa_remember() && remember == 1 { Ok(Some(device.refresh_twofactor_remember())) @@ -359,7 +364,7 @@ fn _selected_data(tf: Option) -> ApiResult { tf.map(|t| t.data).map_res("Two factor doesn't exist") } -fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult { +async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult { use crate::api::core::two_factor; let mut result = json!({ @@ -376,7 +381,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ } Some(TwoFactorType::U2f) if CONFIG.domain_set() => { - let request = two_factor::u2f::generate_u2f_login(user_uuid, conn)?; + let request = two_factor::u2f::generate_u2f_login(user_uuid, conn).await?; let mut challenge_list = Vec::new(); for key in request.registered_keys { @@ -396,17 +401,17 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api } Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => { - let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn)?; + let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?; result["TwoFactorProviders2"][provider.to_string()] = request.0; } Some(TwoFactorType::Duo) => { - let email = match User::find_by_uuid(user_uuid, conn) { + let email = match User::find_by_uuid(user_uuid, conn).await { Some(u) => u.email, None => err!("User does not exist"), }; - let (signature, host) = duo::generate_duo_signature(&email, conn)?; + let (signature, host) = duo::generate_duo_signature(&email, conn).await?; result["TwoFactorProviders2"][provider.to_string()] = json!({ "Host": host, @@ -415,7 +420,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api } Some(tf_type @ TwoFactorType::YubiKey) => { - let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) { + let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { Some(tf) => tf, None => err!("No YubiKey devices registered"), }; @@ -430,14 +435,14 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api Some(tf_type @ TwoFactorType::Email) => { use crate::api::core::two_factor as _tf; - let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) { + let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { Some(tf) => tf, None => err!("No twofactor email registered"), }; // Send email immediately if email is the only 2FA option if providers.len() == 1 { - _tf::email::send_token(user_uuid, conn)? + _tf::email::send_token(user_uuid, conn).await? } let email_data = EmailTokenData::from_json(&twofactor.data)?; @@ -492,7 +497,7 @@ struct ConnectData { device_type: Option, #[field(name = uncased("device_push_token"))] #[field(name = uncased("devicepushtoken"))] - device_push_token: Option, // Unused; mobile device push not yet supported. + _device_push_token: Option, // Unused; mobile device push not yet supported. // Needed for two-factor auth #[field(name = uncased("two_factor_provider"))] diff --git a/src/auth.rs b/src/auth.rs index 6aedae81..7eaf1494 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -350,12 +350,12 @@ impl<'r> FromRequest<'r> for Headers { _ => err_handler!("Error getting DB"), }; - let device = match Device::find_by_uuid(&device_uuid, &conn) { + let device = match Device::find_by_uuid(&device_uuid, &conn).await { Some(device) => device, None => err_handler!("Invalid device id"), }; - let user = match User::find_by_uuid(&user_uuid, &conn) { + let user = match User::find_by_uuid(&user_uuid, &conn).await { Some(user) => user, None => err_handler!("Device has no user associated"), }; @@ -377,7 +377,7 @@ impl<'r> FromRequest<'r> for Headers { // This prevents checking this stamp exception for new requests. let mut user = user; user.reset_stamp_exception(); - if let Err(e) = user.save(&conn) { + if let Err(e) = user.save(&conn).await { error!("Error updating user: {:#?}", e); } err_handler!("Stamp exception is expired") @@ -441,7 +441,7 @@ impl<'r> FromRequest<'r> for OrgHeaders { }; let user = headers.user; - let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) { + let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).await { Some(user) => { if user.status == UserOrgStatus::Confirmed as i32 { user @@ -553,7 +553,9 @@ impl<'r> FromRequest<'r> for ManagerHeaders { }; if !headers.org_user.has_full_access() { - match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) { + match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) + .await + { Some(_) => (), None => err_handler!("The current user isn't a manager for this collection"), } diff --git a/src/db/mod.rs b/src/db/mod.rs index db52d513..aeceda5a 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -30,7 +30,7 @@ pub mod __mysql_schema; #[path = "schemas/postgresql/schema.rs"] pub mod __postgresql_schema; -// There changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools +// These changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools // A wrapper around spawn_blocking that propagates panics to the calling code. pub async fn run_blocking(job: F) -> R diff --git a/src/db/models/attachment.rs b/src/db/models/attachment.rs index bb0f9395..975687e3 100644 --- a/src/db/models/attachment.rs +++ b/src/db/models/attachment.rs @@ -60,7 +60,7 @@ use crate::error::MapResult; /// Database methods impl Attachment { - pub fn save(&self, conn: &DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { match diesel::replace_into(attachments::table) @@ -92,7 +92,7 @@ impl Attachment { } } - pub fn delete(&self, conn: &DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: { crate::util::retry( || diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn), @@ -116,14 +116,14 @@ impl Attachment { }} } - pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { - for attachment in Attachment::find_by_cipher(cipher_uuid, conn) { - attachment.delete(conn)?; + pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { + for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await { + attachment.delete(conn).await?; } Ok(()) } - pub fn find_by_id(id: &str, conn: &DbConn) -> Option { + pub async fn find_by_id(id: &str, conn: &DbConn) -> Option { db_run! { conn: { attachments::table .filter(attachments::id.eq(id.to_lowercase())) @@ -133,7 +133,7 @@ impl Attachment { }} } - pub fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { attachments::table .filter(attachments::cipher_uuid.eq(cipher_uuid)) @@ -143,7 +143,7 @@ impl Attachment { }} } - pub fn size_by_user(user_uuid: &str, conn: &DbConn) -> i64 { + pub async fn size_by_user(user_uuid: &str, conn: &DbConn) -> i64 { db_run! { conn: { let result: Option = attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -155,7 +155,7 @@ impl Attachment { }} } - pub fn count_by_user(user_uuid: &str, conn: &DbConn) -> i64 { + pub async fn count_by_user(user_uuid: &str, conn: &DbConn) -> i64 { db_run! { conn: { attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -166,7 +166,7 @@ impl Attachment { }} } - pub fn size_by_org(org_uuid: &str, conn: &DbConn) -> i64 { + pub async fn size_by_org(org_uuid: &str, conn: &DbConn) -> i64 { db_run! { conn: { let result: Option = attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -178,7 +178,7 @@ impl Attachment { }} } - pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 { db_run! { conn: { attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs index 39aaf580..e6f2050b 100644 --- a/src/db/models/cipher.rs +++ b/src/db/models/cipher.rs @@ -82,10 +82,10 @@ use crate::error::MapResult; /// Database methods impl Cipher { - pub fn to_json(&self, host: &str, user_uuid: &str, conn: &DbConn) -> Value { + pub async fn to_json(&self, host: &str, user_uuid: &str, conn: &DbConn) -> Value { use crate::util::format_date; - let attachments = Attachment::find_by_cipher(&self.uuid, conn); + let attachments = Attachment::find_by_cipher(&self.uuid, conn).await; // When there are no attachments use null instead of an empty array let attachments_json = if attachments.is_empty() { Value::Null @@ -97,7 +97,7 @@ impl Cipher { let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null); - let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, conn) { + let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, conn).await { Some((ro, hp)) => (ro, hp), None => { error!("Cipher ownership assertion failure"); @@ -144,8 +144,8 @@ impl Cipher { "Type": self.atype, "RevisionDate": format_date(&self.updated_at), "DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))), - "FolderId": self.get_folder_uuid(user_uuid, conn), - "Favorite": self.is_favorite(user_uuid, conn), + "FolderId": self.get_folder_uuid(user_uuid, conn).await, + "Favorite": self.is_favorite(user_uuid, conn).await, "Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32), "OrganizationId": self.organization_uuid, "Attachments": attachments_json, @@ -154,7 +154,7 @@ impl Cipher { "OrganizationUseTotp": true, // This field is specific to the cipherDetails type. - "CollectionIds": self.get_collections(user_uuid, conn), + "CollectionIds": self.get_collections(user_uuid, conn).await, "Name": self.name, "Notes": self.notes, @@ -189,28 +189,28 @@ impl Cipher { json_object } - pub fn update_users_revision(&self, conn: &DbConn) -> Vec { + pub async fn update_users_revision(&self, conn: &DbConn) -> Vec { let mut user_uuids = Vec::new(); match self.user_uuid { Some(ref user_uuid) => { - User::update_uuid_revision(user_uuid, conn); + User::update_uuid_revision(user_uuid, conn).await; user_uuids.push(user_uuid.clone()) } None => { // Belongs to Organization, need to update affected users if let Some(ref org_uuid) = self.organization_uuid { - UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).iter().for_each(|user_org| { - User::update_uuid_revision(&user_org.user_uuid, conn); + for user_org in UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await.iter() { + User::update_uuid_revision(&user_org.user_uuid, conn).await; user_uuids.push(user_org.user_uuid.clone()) - }); + } } } }; user_uuids } - pub fn save(&mut self, conn: &DbConn) -> EmptyResult { - self.update_users_revision(conn); + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { + self.update_users_revision(conn).await; self.updated_at = Utc::now().naive_utc(); db_run! { conn: @@ -244,13 +244,13 @@ impl Cipher { } } - pub fn delete(&self, conn: &DbConn) -> EmptyResult { - self.update_users_revision(conn); + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { + self.update_users_revision(conn).await; - FolderCipher::delete_all_by_cipher(&self.uuid, conn)?; - CollectionCipher::delete_all_by_cipher(&self.uuid, conn)?; - Attachment::delete_all_by_cipher(&self.uuid, conn)?; - Favorite::delete_all_by_cipher(&self.uuid, conn)?; + FolderCipher::delete_all_by_cipher(&self.uuid, conn).await?; + CollectionCipher::delete_all_by_cipher(&self.uuid, conn).await?; + Attachment::delete_all_by_cipher(&self.uuid, conn).await?; + Favorite::delete_all_by_cipher(&self.uuid, conn).await?; db_run! { conn: { diesel::delete(ciphers::table.filter(ciphers::uuid.eq(&self.uuid))) @@ -259,54 +259,55 @@ impl Cipher { }} } - pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { - for cipher in Self::find_by_org(org_uuid, conn) { - cipher.delete(conn)?; + pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { + // TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching. + for cipher in Self::find_by_org(org_uuid, conn).await { + cipher.delete(conn).await?; } Ok(()) } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { - for cipher in Self::find_owned_by_user(user_uuid, conn) { - cipher.delete(conn)?; + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + for cipher in Self::find_owned_by_user(user_uuid, conn).await { + cipher.delete(conn).await?; } Ok(()) } /// Purge all ciphers that are old enough to be auto-deleted. - pub fn purge_trash(conn: &DbConn) { + pub async fn purge_trash(conn: &DbConn) { if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() { let now = Utc::now().naive_utc(); let dt = now - Duration::days(auto_delete_days); - for cipher in Self::find_deleted_before(&dt, conn) { - cipher.delete(conn).ok(); + for cipher in Self::find_deleted_before(&dt, conn).await { + cipher.delete(conn).await.ok(); } } } - pub fn move_to_folder(&self, folder_uuid: Option, user_uuid: &str, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(user_uuid, conn); + pub async fn move_to_folder(&self, folder_uuid: Option, user_uuid: &str, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(user_uuid, conn).await; - match (self.get_folder_uuid(user_uuid, conn), folder_uuid) { + match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) { // No changes (None, None) => Ok(()), (Some(ref old), Some(ref new)) if old == new => Ok(()), // Add to folder - (None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn), + (None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await, // Remove from folder - (Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) { - Some(old) => old.delete(conn), + (Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { + Some(old) => old.delete(conn).await, None => err!("Couldn't move from previous folder"), }, // Move to another folder (Some(old), Some(new)) => { - if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) { - old.delete(conn)?; + if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { + old.delete(conn).await?; } - FolderCipher::new(&new, &self.uuid).save(conn) + FolderCipher::new(&new, &self.uuid).save(conn).await } } } @@ -317,9 +318,9 @@ impl Cipher { } /// Returns whether this cipher is owned by an org in which the user has full access. - pub fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool { + pub async fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool { if let Some(ref org_uuid) = self.organization_uuid { - if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) { + if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await { return user_org.has_full_access(); } } @@ -332,11 +333,11 @@ impl Cipher { /// not in any collection the user has access to. Otherwise, the user has /// access to this cipher, and Some(read_only, hide_passwords) represents /// the access restrictions. - pub fn get_access_restrictions(&self, user_uuid: &str, conn: &DbConn) -> Option<(bool, bool)> { + pub async fn get_access_restrictions(&self, user_uuid: &str, conn: &DbConn) -> Option<(bool, bool)> { // Check whether this cipher is directly owned by the user, or is in // a collection that the user has full access to. If so, there are no // access restrictions. - if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, conn) { + if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, conn).await { return Some((false, false)); } @@ -379,31 +380,31 @@ impl Cipher { }} } - pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool { - match self.get_access_restrictions(user_uuid, conn) { + pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool { + match self.get_access_restrictions(user_uuid, conn).await { Some((read_only, _hide_passwords)) => !read_only, None => false, } } - pub fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool { - self.get_access_restrictions(user_uuid, conn).is_some() + pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool { + self.get_access_restrictions(user_uuid, conn).await.is_some() } // Returns whether this cipher is a favorite of the specified user. - pub fn is_favorite(&self, user_uuid: &str, conn: &DbConn) -> bool { - Favorite::is_favorite(&self.uuid, user_uuid, conn) + pub async fn is_favorite(&self, user_uuid: &str, conn: &DbConn) -> bool { + Favorite::is_favorite(&self.uuid, user_uuid, conn).await } // Sets whether this cipher is a favorite of the specified user. - pub fn set_favorite(&self, favorite: Option, user_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn set_favorite(&self, favorite: Option, user_uuid: &str, conn: &DbConn) -> EmptyResult { match favorite { None => Ok(()), // No change requested. - Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn), + Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await, } } - pub fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option { + pub async fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option { db_run! {conn: { folders_ciphers::table .inner_join(folders::table) @@ -415,7 +416,7 @@ impl Cipher { }} } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! {conn: { ciphers::table .filter(ciphers::uuid.eq(uuid)) @@ -437,7 +438,7 @@ impl Cipher { // true, then the non-interesting ciphers will not be returned. As a // result, those ciphers will not appear in "My Vault" for the org // owner/admin, but they can still be accessed via the org vault view. - pub fn find_by_user(user_uuid: &str, visible_only: bool, conn: &DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &DbConn) -> Vec { db_run! {conn: { let mut query = ciphers::table .left_join(ciphers_collections::table.on( @@ -472,12 +473,12 @@ impl Cipher { } // Find all ciphers visible to the specified user. - pub fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec { - Self::find_by_user(user_uuid, true, conn) + pub async fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec { + Self::find_by_user(user_uuid, true, conn).await } // Find all ciphers directly owned by the specified user. - pub fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! {conn: { ciphers::table .filter( @@ -488,7 +489,7 @@ impl Cipher { }} } - pub fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 { + pub async fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 { db_run! {conn: { ciphers::table .filter(ciphers::user_uuid.eq(user_uuid)) @@ -499,7 +500,7 @@ impl Cipher { }} } - pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { db_run! {conn: { ciphers::table .filter(ciphers::organization_uuid.eq(org_uuid)) @@ -507,7 +508,7 @@ impl Cipher { }} } - pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 { db_run! {conn: { ciphers::table .filter(ciphers::organization_uuid.eq(org_uuid)) @@ -518,7 +519,7 @@ impl Cipher { }} } - pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec { db_run! {conn: { folders_ciphers::table.inner_join(ciphers::table) .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) @@ -528,7 +529,7 @@ impl Cipher { } /// Find all ciphers that were deleted before the specified datetime. - pub fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { + pub async fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { db_run! {conn: { ciphers::table .filter(ciphers::deleted_at.lt(dt)) @@ -536,7 +537,7 @@ impl Cipher { }} } - pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec { + pub async fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec { db_run! {conn: { ciphers_collections::table .inner_join(collections::table.on( diff --git a/src/db/models/collection.rs b/src/db/models/collection.rs index 2073ca17..b5782f7b 100644 --- a/src/db/models/collection.rs +++ b/src/db/models/collection.rs @@ -57,11 +57,11 @@ impl Collection { }) } - pub fn to_json_details(&self, user_uuid: &str, conn: &DbConn) -> Value { + pub async fn to_json_details(&self, user_uuid: &str, conn: &DbConn) -> Value { let mut json_object = self.to_json(); json_object["Object"] = json!("collectionDetails"); - json_object["ReadOnly"] = json!(!self.is_writable_by_user(user_uuid, conn)); - json_object["HidePasswords"] = json!(self.hide_passwords_for_user(user_uuid, conn)); + json_object["ReadOnly"] = json!(!self.is_writable_by_user(user_uuid, conn).await); + json_object["HidePasswords"] = json!(self.hide_passwords_for_user(user_uuid, conn).await); json_object } } @@ -73,8 +73,8 @@ use crate::error::MapResult; /// Database methods impl Collection { - pub fn save(&self, conn: &DbConn) -> EmptyResult { - self.update_users_revision(conn); + pub async fn save(&self, conn: &DbConn) -> EmptyResult { + self.update_users_revision(conn).await; db_run! { conn: sqlite, mysql { @@ -107,10 +107,10 @@ impl Collection { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { - self.update_users_revision(conn); - CollectionCipher::delete_all_by_collection(&self.uuid, conn)?; - CollectionUser::delete_all_by_collection(&self.uuid, conn)?; + pub async fn delete(self, conn: &DbConn) -> EmptyResult { + self.update_users_revision(conn).await; + CollectionCipher::delete_all_by_collection(&self.uuid, conn).await?; + CollectionUser::delete_all_by_collection(&self.uuid, conn).await?; db_run! { conn: { diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid))) @@ -119,20 +119,20 @@ impl Collection { }} } - pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { - for collection in Self::find_by_organization(org_uuid, conn) { - collection.delete(conn)?; + pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { + for collection in Self::find_by_organization(org_uuid, conn).await { + collection.delete(conn).await?; } Ok(()) } - pub fn update_users_revision(&self, conn: &DbConn) { - UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).iter().for_each(|user_org| { - User::update_uuid_revision(&user_org.user_uuid, conn); - }); + pub async fn update_users_revision(&self, conn: &DbConn) { + for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() { + User::update_uuid_revision(&user_org.user_uuid, conn).await; + } } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { collections::table .filter(collections::uuid.eq(uuid)) @@ -142,7 +142,7 @@ impl Collection { }} } - pub fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { collections::table .left_join(users_collections::table.on( @@ -167,11 +167,11 @@ impl Collection { }} } - pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec { - Self::find_by_user_uuid(user_uuid, conn).into_iter().filter(|c| c.org_uuid == org_uuid).collect() + pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec { + Self::find_by_user_uuid(user_uuid, conn).await.into_iter().filter(|c| c.org_uuid == org_uuid).collect() } - pub fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { collections::table .filter(collections::org_uuid.eq(org_uuid)) @@ -181,7 +181,7 @@ impl Collection { }} } - pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { collections::table .filter(collections::uuid.eq(uuid)) @@ -193,7 +193,7 @@ impl Collection { }} } - pub fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { collections::table .left_join(users_collections::table.on( @@ -219,8 +219,8 @@ impl Collection { }} } - pub fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool { - match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) { + pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool { + match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await { None => false, // Not in Org Some(user_org) => { if user_org.has_full_access() { @@ -241,8 +241,8 @@ impl Collection { } } - pub fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool { - match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) { + pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool { + match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await { None => true, // Not in Org Some(user_org) => { if user_org.has_full_access() { @@ -266,7 +266,7 @@ impl Collection { /// Database methods impl CollectionUser { - pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_collections::table .filter(users_collections::user_uuid.eq(user_uuid)) @@ -279,14 +279,14 @@ impl CollectionUser { }} } - pub fn save( + pub async fn save( user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn, ) -> EmptyResult { - User::update_uuid_revision(user_uuid, conn); + User::update_uuid_revision(user_uuid, conn).await; db_run! { conn: sqlite, mysql { @@ -337,8 +337,8 @@ impl CollectionUser { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(&self.user_uuid, conn); + pub async fn delete(self, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(&self.user_uuid, conn).await; db_run! { conn: { diesel::delete( @@ -351,7 +351,7 @@ impl CollectionUser { }} } - pub fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_collections::table .filter(users_collections::collection_uuid.eq(collection_uuid)) @@ -362,7 +362,7 @@ impl CollectionUser { }} } - pub fn find_by_collection_and_user(collection_uuid: &str, user_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_collection_and_user(collection_uuid: &str, user_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { users_collections::table .filter(users_collections::collection_uuid.eq(collection_uuid)) @@ -374,10 +374,10 @@ impl CollectionUser { }} } - pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult { - CollectionUser::find_by_collection(collection_uuid, conn).iter().for_each(|collection| { - User::update_uuid_revision(&collection.user_uuid, conn); - }); + pub async fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult { + for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() { + User::update_uuid_revision(&collection.user_uuid, conn).await; + } db_run! { conn: { diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid))) @@ -386,8 +386,8 @@ impl CollectionUser { }} } - pub fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> EmptyResult { - let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn); + pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> EmptyResult { + let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await; db_run! { conn: { for user in collectionusers { @@ -405,8 +405,8 @@ impl CollectionUser { /// Database methods impl CollectionCipher { - pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult { - Self::update_users_revision(collection_uuid, conn); + pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult { + Self::update_users_revision(collection_uuid, conn).await; db_run! { conn: sqlite, mysql { @@ -435,8 +435,8 @@ impl CollectionCipher { } } - pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult { - Self::update_users_revision(collection_uuid, conn); + pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult { + Self::update_users_revision(collection_uuid, conn).await; db_run! { conn: { diesel::delete( @@ -449,7 +449,7 @@ impl CollectionCipher { }} } - pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -457,7 +457,7 @@ impl CollectionCipher { }} } - pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid))) .execute(conn) @@ -465,9 +465,9 @@ impl CollectionCipher { }} } - pub fn update_users_revision(collection_uuid: &str, conn: &DbConn) { - if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn) { - collection.update_users_revision(conn); + pub async fn update_users_revision(collection_uuid: &str, conn: &DbConn) { + if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await { + collection.update_users_revision(conn).await; } } } diff --git a/src/db/models/device.rs b/src/db/models/device.rs index 05955c04..2f5cd018 100644 --- a/src/db/models/device.rs +++ b/src/db/models/device.rs @@ -118,7 +118,7 @@ use crate::error::MapResult; /// Database methods impl Device { - pub fn save(&mut self, conn: &DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { self.updated_at = Utc::now().naive_utc(); db_run! { conn: @@ -138,7 +138,7 @@ impl Device { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(devices::table.filter(devices::uuid.eq(self.uuid))) .execute(conn) @@ -146,14 +146,14 @@ impl Device { }} } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { - for device in Self::find_by_user(user_uuid, conn) { - device.delete(conn)?; + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + for device in Self::find_by_user(user_uuid, conn).await { + device.delete(conn).await?; } Ok(()) } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::uuid.eq(uuid)) @@ -163,7 +163,7 @@ impl Device { }} } - pub fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option { + pub async fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::refresh_token.eq(refresh_token)) @@ -173,7 +173,7 @@ impl Device { }} } - pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) @@ -183,7 +183,7 @@ impl Device { }} } - pub fn find_latest_active_by_user(user_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_latest_active_by_user(user_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) diff --git a/src/db/models/emergency_access.rs b/src/db/models/emergency_access.rs index 7327eb34..e878507b 100644 --- a/src/db/models/emergency_access.rs +++ b/src/db/models/emergency_access.rs @@ -73,8 +73,8 @@ impl EmergencyAccess { }) } - pub fn to_json_grantor_details(&self, conn: &DbConn) -> Value { - let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).expect("Grantor user not found."); + pub async fn to_json_grantor_details(&self, conn: &DbConn) -> Value { + let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found."); json!({ "Id": self.uuid, @@ -89,11 +89,11 @@ impl EmergencyAccess { } #[allow(clippy::manual_map)] - pub fn to_json_grantee_details(&self, conn: &DbConn) -> Value { + pub async fn to_json_grantee_details(&self, conn: &DbConn) -> Value { let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() { - Some(User::find_by_uuid(grantee_uuid, conn).expect("Grantee user not found.")) + Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")) } else if let Some(email) = self.email.as_deref() { - Some(User::find_by_mail(email, conn).expect("Grantee user not found.")) + Some(User::find_by_mail(email, conn).await.expect("Grantee user not found.")) } else { None }; @@ -155,8 +155,8 @@ use crate::api::EmptyResult; use crate::error::MapResult; impl EmergencyAccess { - pub fn save(&mut self, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(&self.grantor_uuid, conn); + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(&self.grantor_uuid, conn).await; self.updated_at = Utc::now().naive_utc(); db_run! { conn: @@ -190,18 +190,18 @@ impl EmergencyAccess { } } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { - for ea in Self::find_all_by_grantor_uuid(user_uuid, conn) { - ea.delete(conn)?; + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await { + ea.delete(conn).await?; } - for ea in Self::find_all_by_grantee_uuid(user_uuid, conn) { - ea.delete(conn)?; + for ea in Self::find_all_by_grantee_uuid(user_uuid, conn).await { + ea.delete(conn).await?; } Ok(()) } - pub fn delete(self, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(&self.grantor_uuid, conn); + pub async fn delete(self, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(&self.grantor_uuid, conn).await; db_run! { conn: { diesel::delete(emergency_access::table.filter(emergency_access::uuid.eq(self.uuid))) @@ -210,7 +210,7 @@ impl EmergencyAccess { }} } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) @@ -219,7 +219,7 @@ impl EmergencyAccess { }} } - pub fn find_by_grantor_uuid_and_grantee_uuid_or_email( + pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email( grantor_uuid: &str, grantee_uuid: &str, email: &str, @@ -234,7 +234,7 @@ impl EmergencyAccess { }} } - pub fn find_all_recoveries(conn: &DbConn) -> Vec { + pub async fn find_all_recoveries(conn: &DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::status.eq(EmergencyAccessStatus::RecoveryInitiated as i32)) @@ -242,7 +242,7 @@ impl EmergencyAccess { }} } - pub fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) @@ -252,7 +252,7 @@ impl EmergencyAccess { }} } - pub fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::grantee_uuid.eq(grantee_uuid)) @@ -260,7 +260,7 @@ impl EmergencyAccess { }} } - pub fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option { + pub async fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::email.eq(grantee_email)) @@ -270,7 +270,7 @@ impl EmergencyAccess { }} } - pub fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::grantor_uuid.eq(grantor_uuid)) diff --git a/src/db/models/favorite.rs b/src/db/models/favorite.rs index cb3e3420..4ff31939 100644 --- a/src/db/models/favorite.rs +++ b/src/db/models/favorite.rs @@ -19,7 +19,7 @@ use crate::error::MapResult; impl Favorite { // Returns whether the specified cipher is a favorite of the specified user. - pub fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool { + pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool { db_run! { conn: { let query = favorites::table .filter(favorites::cipher_uuid.eq(cipher_uuid)) @@ -31,11 +31,11 @@ impl Favorite { } // Sets whether the specified cipher is a favorite of the specified user. - pub fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> EmptyResult { - let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn), favorite); + pub async fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> EmptyResult { + let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite); match (old, new) { (false, true) => { - User::update_uuid_revision(user_uuid, conn); + User::update_uuid_revision(user_uuid, conn).await; db_run! { conn: { diesel::insert_into(favorites::table) .values(( @@ -47,7 +47,7 @@ impl Favorite { }} } (true, false) => { - User::update_uuid_revision(user_uuid, conn); + User::update_uuid_revision(user_uuid, conn).await; db_run! { conn: { diesel::delete( favorites::table @@ -64,7 +64,7 @@ impl Favorite { } // Delete all favorite entries associated with the specified cipher. - pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -73,7 +73,7 @@ impl Favorite { } // Delete all favorite entries associated with the specified user. - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid))) .execute(conn) diff --git a/src/db/models/folder.rs b/src/db/models/folder.rs index d51e71b4..33976203 100644 --- a/src/db/models/folder.rs +++ b/src/db/models/folder.rs @@ -70,8 +70,8 @@ use crate::error::MapResult; /// Database methods impl Folder { - pub fn save(&mut self, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(&self.user_uuid, conn); + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(&self.user_uuid, conn).await; self.updated_at = Utc::now().naive_utc(); db_run! { conn: @@ -105,9 +105,9 @@ impl Folder { } } - pub fn delete(&self, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(&self.user_uuid, conn); - FolderCipher::delete_all_by_folder(&self.uuid, conn)?; + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(&self.user_uuid, conn).await; + FolderCipher::delete_all_by_folder(&self.uuid, conn).await?; db_run! { conn: { diesel::delete(folders::table.filter(folders::uuid.eq(&self.uuid))) @@ -116,14 +116,14 @@ impl Folder { }} } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { - for folder in Self::find_by_user(user_uuid, conn) { - folder.delete(conn)?; + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + for folder in Self::find_by_user(user_uuid, conn).await { + folder.delete(conn).await?; } Ok(()) } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { folders::table .filter(folders::uuid.eq(uuid)) @@ -133,7 +133,7 @@ impl Folder { }} } - pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { folders::table .filter(folders::user_uuid.eq(user_uuid)) @@ -145,7 +145,7 @@ impl Folder { } impl FolderCipher { - pub fn save(&self, conn: &DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { // Not checking for ForeignKey Constraints here. @@ -167,7 +167,7 @@ impl FolderCipher { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete( folders_ciphers::table @@ -179,7 +179,7 @@ impl FolderCipher { }} } - pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -187,7 +187,7 @@ impl FolderCipher { }} } - pub fn delete_all_by_folder(folder_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_folder(folder_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid))) .execute(conn) @@ -195,7 +195,7 @@ impl FolderCipher { }} } - pub fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { folders_ciphers::table .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) @@ -206,7 +206,7 @@ impl FolderCipher { }} } - pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { folders_ciphers::table .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs index 7c6cefd3..04fc6f45 100644 --- a/src/db/models/org_policy.rs +++ b/src/db/models/org_policy.rs @@ -72,7 +72,7 @@ impl OrgPolicy { /// Database methods impl OrgPolicy { - pub fn save(&self, conn: &DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { match diesel::replace_into(org_policies::table) @@ -115,7 +115,7 @@ impl OrgPolicy { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid))) .execute(conn) @@ -123,7 +123,7 @@ impl OrgPolicy { }} } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { org_policies::table .filter(org_policies::uuid.eq(uuid)) @@ -133,7 +133,7 @@ impl OrgPolicy { }} } - pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { org_policies::table .filter(org_policies::org_uuid.eq(org_uuid)) @@ -143,7 +143,7 @@ impl OrgPolicy { }} } - pub fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { org_policies::table .inner_join( @@ -161,7 +161,7 @@ impl OrgPolicy { }} } - pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Option { + pub async fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Option { db_run! { conn: { org_policies::table .filter(org_policies::org_uuid.eq(org_uuid)) @@ -172,7 +172,7 @@ impl OrgPolicy { }} } - pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid))) .execute(conn) @@ -183,12 +183,12 @@ impl OrgPolicy { /// Returns true if the user belongs to an org that has enabled the specified policy type, /// and the user is not an owner or admin of that org. This is only useful for checking /// applicability of policy types that have these particular semantics. - pub fn is_applicable_to_user(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> bool { + pub async fn is_applicable_to_user(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> bool { // TODO: Should check confirmed and accepted users - for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn) { + for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn).await { if policy.enabled && policy.has_type(policy_type) { let org_uuid = &policy.org_uuid; - if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) { + if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await { if user.atype < UserOrgType::Admin { return true; } @@ -200,11 +200,11 @@ impl OrgPolicy { /// Returns true if the user belongs to an org that has enabled the `DisableHideEmail` /// option of the `Send Options` policy, and the user is not an owner or admin of that org. - pub fn is_hide_email_disabled(user_uuid: &str, conn: &DbConn) -> bool { - for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn) { + pub async fn is_hide_email_disabled(user_uuid: &str, conn: &DbConn) -> bool { + for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn).await { if policy.enabled && policy.has_type(OrgPolicyType::SendOptions) { let org_uuid = &policy.org_uuid; - if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) { + if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await { if user.atype < UserOrgType::Admin { match serde_json::from_str::>(&policy.data) { Ok(opts) => { @@ -220,12 +220,4 @@ impl OrgPolicy { } false } - - /*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { - db_run! { conn: { - diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid))) - .execute(conn) - .map_res("Error deleting twofactors") - }} - }*/ } diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs index 67dd5357..56af0c47 100644 --- a/src/db/models/organization.rs +++ b/src/db/models/organization.rs @@ -193,10 +193,10 @@ use crate::error::MapResult; /// Database methods impl Organization { - pub fn save(&self, conn: &DbConn) -> EmptyResult { - UserOrganization::find_by_org(&self.uuid, conn).iter().for_each(|user_org| { - User::update_uuid_revision(&user_org.user_uuid, conn); - }); + pub async fn save(&self, conn: &DbConn) -> EmptyResult { + for user_org in UserOrganization::find_by_org(&self.uuid, conn).await.iter() { + User::update_uuid_revision(&user_org.user_uuid, conn).await; + } db_run! { conn: sqlite, mysql { @@ -230,13 +230,13 @@ impl Organization { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { use super::{Cipher, Collection}; - Cipher::delete_all_by_organization(&self.uuid, conn)?; - Collection::delete_all_by_organization(&self.uuid, conn)?; - UserOrganization::delete_all_by_organization(&self.uuid, conn)?; - OrgPolicy::delete_all_by_organization(&self.uuid, conn)?; + Cipher::delete_all_by_organization(&self.uuid, conn).await?; + Collection::delete_all_by_organization(&self.uuid, conn).await?; + UserOrganization::delete_all_by_organization(&self.uuid, conn).await?; + OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?; db_run! { conn: { diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid))) @@ -245,7 +245,7 @@ impl Organization { }} } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { organizations::table .filter(organizations::uuid.eq(uuid)) @@ -254,7 +254,7 @@ impl Organization { }} } - pub fn get_all(conn: &DbConn) -> Vec { + pub async fn get_all(conn: &DbConn) -> Vec { db_run! { conn: { organizations::table.load::(conn).expect("Error loading organizations").from_db() }} @@ -262,8 +262,8 @@ impl Organization { } impl UserOrganization { - pub fn to_json(&self, conn: &DbConn) -> Value { - let org = Organization::find_by_uuid(&self.org_uuid, conn).unwrap(); + pub async fn to_json(&self, conn: &DbConn) -> Value { + let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap(); json!({ "Id": self.org_uuid, @@ -322,8 +322,8 @@ impl UserOrganization { }) } - pub fn to_json_user_details(&self, conn: &DbConn) -> Value { - let user = User::find_by_uuid(&self.user_uuid, conn).unwrap(); + pub async fn to_json_user_details(&self, conn: &DbConn) -> Value { + let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap(); json!({ "Id": self.uuid, @@ -347,11 +347,12 @@ impl UserOrganization { }) } - pub fn to_json_details(&self, conn: &DbConn) -> Value { + pub async fn to_json_details(&self, conn: &DbConn) -> Value { let coll_uuids = if self.access_all { vec![] // If we have complete access, no need to fill the array } else { - let collections = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn); + let collections = + CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn).await; collections .iter() .map(|c| { @@ -376,8 +377,8 @@ impl UserOrganization { "Object": "organizationUserDetails", }) } - pub fn save(&self, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(&self.user_uuid, conn); + pub async fn save(&self, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(&self.user_uuid, conn).await; db_run! { conn: sqlite, mysql { @@ -410,10 +411,10 @@ impl UserOrganization { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { - User::update_uuid_revision(&self.user_uuid, conn); + pub async fn delete(self, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(&self.user_uuid, conn).await; - CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn)?; + CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn).await?; db_run! { conn: { diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid))) @@ -422,23 +423,23 @@ impl UserOrganization { }} } - pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { - for user_org in Self::find_by_org(org_uuid, conn) { - user_org.delete(conn)?; + pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult { + for user_org in Self::find_by_org(org_uuid, conn).await { + user_org.delete(conn).await?; } Ok(()) } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { - for user_org in Self::find_any_state_by_user(user_uuid, conn) { - user_org.delete(conn)?; + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + for user_org in Self::find_any_state_by_user(user_uuid, conn).await { + user_org.delete(conn).await?; } Ok(()) } - pub fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option { - if let Some(user) = super::User::find_by_mail(email, conn) { - if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn) { + pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option { + if let Some(user) = super::User::find_by_mail(email, conn).await { + if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await { return Some(user_org); } } @@ -458,7 +459,7 @@ impl UserOrganization { (self.access_all || self.atype >= UserOrgType::Admin) && self.has_status(UserOrgStatus::Confirmed) } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::uuid.eq(uuid)) @@ -467,7 +468,7 @@ impl UserOrganization { }} } - pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::uuid.eq(uuid)) @@ -477,7 +478,7 @@ impl UserOrganization { }} } - pub fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -487,7 +488,7 @@ impl UserOrganization { }} } - pub fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -497,7 +498,7 @@ impl UserOrganization { }} } - pub fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -506,7 +507,7 @@ impl UserOrganization { }} } - pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -515,7 +516,7 @@ impl UserOrganization { }} } - pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -526,7 +527,7 @@ impl UserOrganization { }} } - pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Vec { + pub async fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -536,7 +537,7 @@ impl UserOrganization { }} } - pub fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -546,7 +547,7 @@ impl UserOrganization { }} } - pub fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> Vec { + pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .inner_join( @@ -565,7 +566,7 @@ impl UserOrganization { }} } - pub fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -587,7 +588,7 @@ impl UserOrganization { }} } - pub fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) diff --git a/src/db/models/send.rs b/src/db/models/send.rs index 9cfb7b1e..cc8fd4fa 100644 --- a/src/db/models/send.rs +++ b/src/db/models/send.rs @@ -47,7 +47,7 @@ pub enum SendType { } impl Send { - pub fn new(atype: i32, name: String, data: String, akey: String, deletion_date: NaiveDateTime) -> Self { + pub async fn new(atype: i32, name: String, data: String, akey: String, deletion_date: NaiveDateTime) -> Self { let now = Utc::now().naive_utc(); Self { @@ -103,7 +103,7 @@ impl Send { } } - pub fn creator_identifier(&self, conn: &DbConn) -> Option { + pub async fn creator_identifier(&self, conn: &DbConn) -> Option { if let Some(hide_email) = self.hide_email { if hide_email { return None; @@ -111,7 +111,7 @@ impl Send { } if let Some(user_uuid) = &self.user_uuid { - if let Some(user) = User::find_by_uuid(user_uuid, conn) { + if let Some(user) = User::find_by_uuid(user_uuid, conn).await { return Some(user.email); } } @@ -150,7 +150,7 @@ impl Send { }) } - pub fn to_json_access(&self, conn: &DbConn) -> Value { + pub async fn to_json_access(&self, conn: &DbConn) -> Value { use crate::util::format_date; let data: Value = serde_json::from_str(&self.data).unwrap_or_default(); @@ -164,7 +164,7 @@ impl Send { "File": if self.atype == SendType::File as i32 { Some(&data) } else { None }, "ExpirationDate": self.expiration_date.as_ref().map(format_date), - "CreatorIdentifier": self.creator_identifier(conn), + "CreatorIdentifier": self.creator_identifier(conn).await, "Object": "send-access", }) } @@ -176,8 +176,8 @@ use crate::api::EmptyResult; use crate::error::MapResult; impl Send { - pub fn save(&mut self, conn: &DbConn) -> EmptyResult { - self.update_users_revision(conn); + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { + self.update_users_revision(conn).await; self.revision_date = Utc::now().naive_utc(); db_run! { conn: @@ -211,8 +211,8 @@ impl Send { } } - pub fn delete(&self, conn: &DbConn) -> EmptyResult { - self.update_users_revision(conn); + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { + self.update_users_revision(conn).await; if self.atype == SendType::File as i32 { std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok(); @@ -226,17 +226,17 @@ impl Send { } /// Purge all sends that are past their deletion date. - pub fn purge(conn: &DbConn) { - for send in Self::find_by_past_deletion_date(conn) { - send.delete(conn).ok(); + pub async fn purge(conn: &DbConn) { + for send in Self::find_by_past_deletion_date(conn).await { + send.delete(conn).await.ok(); } } - pub fn update_users_revision(&self, conn: &DbConn) -> Vec { + pub async fn update_users_revision(&self, conn: &DbConn) -> Vec { let mut user_uuids = Vec::new(); match &self.user_uuid { Some(user_uuid) => { - User::update_uuid_revision(user_uuid, conn); + User::update_uuid_revision(user_uuid, conn).await; user_uuids.push(user_uuid.clone()) } None => { @@ -246,14 +246,14 @@ impl Send { user_uuids } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { - for send in Self::find_by_user(user_uuid, conn) { - send.delete(conn)?; + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + for send in Self::find_by_user(user_uuid, conn).await { + send.delete(conn).await?; } Ok(()) } - pub fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option { + pub async fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option { use data_encoding::BASE64URL_NOPAD; use uuid::Uuid; @@ -267,10 +267,10 @@ impl Send { Err(_) => return None, }; - Self::find_by_uuid(&uuid, conn) + Self::find_by_uuid(&uuid, conn).await } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! {conn: { sends::table .filter(sends::uuid.eq(uuid)) @@ -280,7 +280,7 @@ impl Send { }} } - pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! {conn: { sends::table .filter(sends::user_uuid.eq(user_uuid)) @@ -288,7 +288,7 @@ impl Send { }} } - pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec { db_run! {conn: { sends::table .filter(sends::organization_uuid.eq(org_uuid)) @@ -296,7 +296,7 @@ impl Send { }} } - pub fn find_by_past_deletion_date(conn: &DbConn) -> Vec { + pub async fn find_by_past_deletion_date(conn: &DbConn) -> Vec { let now = Utc::now().naive_utc(); db_run! {conn: { sends::table diff --git a/src/db/models/two_factor.rs b/src/db/models/two_factor.rs index 6c874df1..03ca23b4 100644 --- a/src/db/models/two_factor.rs +++ b/src/db/models/two_factor.rs @@ -71,7 +71,7 @@ impl TwoFactor { /// Database methods impl TwoFactor { - pub fn save(&self, conn: &DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { match diesel::replace_into(twofactor::table) @@ -110,7 +110,7 @@ impl TwoFactor { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor::table.filter(twofactor::uuid.eq(self.uuid))) .execute(conn) @@ -118,7 +118,7 @@ impl TwoFactor { }} } - pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec { db_run! { conn: { twofactor::table .filter(twofactor::user_uuid.eq(user_uuid)) @@ -129,7 +129,7 @@ impl TwoFactor { }} } - pub fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option { + pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option { db_run! { conn: { twofactor::table .filter(twofactor::user_uuid.eq(user_uuid)) @@ -140,7 +140,7 @@ impl TwoFactor { }} } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid))) .execute(conn) @@ -148,7 +148,7 @@ impl TwoFactor { }} } - pub fn migrate_u2f_to_webauthn(conn: &DbConn) -> EmptyResult { + pub async fn migrate_u2f_to_webauthn(conn: &DbConn) -> EmptyResult { let u2f_factors = db_run! { conn: { twofactor::table .filter(twofactor::atype.eq(TwoFactorType::U2f as i32)) @@ -168,7 +168,7 @@ impl TwoFactor { continue; } - let (_, mut webauthn_regs) = get_webauthn_registrations(&u2f.user_uuid, conn)?; + let (_, mut webauthn_regs) = get_webauthn_registrations(&u2f.user_uuid, conn).await?; // If the user already has webauthn registrations saved, don't overwrite them if !webauthn_regs.is_empty() { @@ -207,10 +207,11 @@ impl TwoFactor { } u2f.data = serde_json::to_string(®s)?; - u2f.save(conn)?; + u2f.save(conn).await?; TwoFactor::new(u2f.user_uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(&webauthn_regs)?) - .save(conn)?; + .save(conn) + .await?; } Ok(()) diff --git a/src/db/models/two_factor_incomplete.rs b/src/db/models/two_factor_incomplete.rs index d58398ec..1f292a08 100644 --- a/src/db/models/two_factor_incomplete.rs +++ b/src/db/models/two_factor_incomplete.rs @@ -22,7 +22,7 @@ db_object! { } impl TwoFactorIncomplete { - pub fn mark_incomplete( + pub async fn mark_incomplete( user_uuid: &str, device_uuid: &str, device_name: &str, @@ -36,7 +36,7 @@ impl TwoFactorIncomplete { // Don't update the data for an existing user/device pair, since that // would allow an attacker to arbitrarily delay notifications by // sending repeated 2FA attempts to reset the timer. - let existing = Self::find_by_user_and_device(user_uuid, device_uuid, conn); + let existing = Self::find_by_user_and_device(user_uuid, device_uuid, conn).await; if existing.is_some() { return Ok(()); } @@ -55,15 +55,15 @@ impl TwoFactorIncomplete { }} } - pub fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult { if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { return Ok(()); } - Self::delete_by_user_and_device(user_uuid, device_uuid, conn) + Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await } - pub fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { twofactor_incomplete::table .filter(twofactor_incomplete::user_uuid.eq(user_uuid)) @@ -74,7 +74,7 @@ impl TwoFactorIncomplete { }} } - pub fn find_logins_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { + pub async fn find_logins_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { db_run! {conn: { twofactor_incomplete::table .filter(twofactor_incomplete::login_time.lt(dt)) @@ -84,11 +84,11 @@ impl TwoFactorIncomplete { }} } - pub fn delete(self, conn: &DbConn) -> EmptyResult { - Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn) + pub async fn delete(self, conn: &DbConn) -> EmptyResult { + Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await } - pub fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor_incomplete::table .filter(twofactor_incomplete::user_uuid.eq(user_uuid)) @@ -98,7 +98,7 @@ impl TwoFactorIncomplete { }} } - pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid))) .execute(conn) diff --git a/src/db/models/user.rs b/src/db/models/user.rs index 599661e5..80a0cb8e 100644 --- a/src/db/models/user.rs +++ b/src/db/models/user.rs @@ -192,12 +192,20 @@ use crate::db::DbConn; use crate::api::EmptyResult; use crate::error::MapResult; +use futures::{stream, stream::StreamExt}; + /// Database methods impl User { - pub fn to_json(&self, conn: &DbConn) -> Value { - let orgs = UserOrganization::find_confirmed_by_user(&self.uuid, conn); - let orgs_json: Vec = orgs.iter().map(|c| c.to_json(conn)).collect(); - let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty(); + pub async fn to_json(&self, conn: &DbConn) -> Value { + let orgs_json = stream::iter(UserOrganization::find_confirmed_by_user(&self.uuid, conn).await) + .then(|c| async { + let c = c; // Move out this single variable + c.to_json(conn).await + }) + .collect::>() + .await; + + let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).await.is_empty(); // TODO: Might want to save the status field in the DB let status = if self.password_hash.is_empty() { @@ -227,7 +235,7 @@ impl User { }) } - pub fn save(&mut self, conn: &DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { if self.email.trim().is_empty() { err!("User email can't be empty") } @@ -265,26 +273,26 @@ impl User { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { - for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn) { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { + for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await { if user_org.atype == UserOrgType::Owner { let owner_type = UserOrgType::Owner as i32; - if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, conn).len() <= 1 { + if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, conn).await.len() <= 1 { err!("Can't delete last owner") } } } - Send::delete_all_by_user(&self.uuid, conn)?; - EmergencyAccess::delete_all_by_user(&self.uuid, conn)?; - UserOrganization::delete_all_by_user(&self.uuid, conn)?; - Cipher::delete_all_by_user(&self.uuid, conn)?; - Favorite::delete_all_by_user(&self.uuid, conn)?; - Folder::delete_all_by_user(&self.uuid, conn)?; - Device::delete_all_by_user(&self.uuid, conn)?; - TwoFactor::delete_all_by_user(&self.uuid, conn)?; - TwoFactorIncomplete::delete_all_by_user(&self.uuid, conn)?; - Invitation::take(&self.email, conn); // Delete invitation if any + Send::delete_all_by_user(&self.uuid, conn).await?; + EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?; + UserOrganization::delete_all_by_user(&self.uuid, conn).await?; + Cipher::delete_all_by_user(&self.uuid, conn).await?; + Favorite::delete_all_by_user(&self.uuid, conn).await?; + Folder::delete_all_by_user(&self.uuid, conn).await?; + Device::delete_all_by_user(&self.uuid, conn).await?; + TwoFactor::delete_all_by_user(&self.uuid, conn).await?; + TwoFactorIncomplete::delete_all_by_user(&self.uuid, conn).await?; + Invitation::take(&self.email, conn).await; // Delete invitation if any db_run! {conn: { diesel::delete(users::table.filter(users::uuid.eq(self.uuid))) @@ -293,13 +301,13 @@ impl User { }} } - pub fn update_uuid_revision(uuid: &str, conn: &DbConn) { - if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn) { + pub async fn update_uuid_revision(uuid: &str, conn: &DbConn) { + if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await { warn!("Failed to update revision for {}: {:#?}", uuid, e); } } - pub fn update_all_revisions(conn: &DbConn) -> EmptyResult { + pub async fn update_all_revisions(conn: &DbConn) -> EmptyResult { let updated_at = Utc::now().naive_utc(); db_run! {conn: { @@ -312,13 +320,13 @@ impl User { }} } - pub fn update_revision(&mut self, conn: &DbConn) -> EmptyResult { + pub async fn update_revision(&mut self, conn: &DbConn) -> EmptyResult { self.updated_at = Utc::now().naive_utc(); - Self::_update_revision(&self.uuid, &self.updated_at, conn) + Self::_update_revision(&self.uuid, &self.updated_at, conn).await } - fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult { + async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult { db_run! {conn: { crate::util::retry(|| { diesel::update(users::table.filter(users::uuid.eq(uuid))) @@ -329,7 +337,7 @@ impl User { }} } - pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option { + pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option { let lower_mail = mail.to_lowercase(); db_run! {conn: { users::table @@ -340,20 +348,20 @@ impl User { }} } - pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option { db_run! {conn: { users::table.filter(users::uuid.eq(uuid)).first::(conn).ok().from_db() }} } - pub fn get_all(conn: &DbConn) -> Vec { + pub async fn get_all(conn: &DbConn) -> Vec { db_run! {conn: { users::table.load::(conn).expect("Error loading users").from_db() }} } - pub fn last_active(&self, conn: &DbConn) -> Option { - match Device::find_latest_active_by_user(&self.uuid, conn) { + pub async fn last_active(&self, conn: &DbConn) -> Option { + match Device::find_latest_active_by_user(&self.uuid, conn).await { Some(device) => Some(device.updated_at), None => None, } @@ -368,7 +376,7 @@ impl Invitation { } } - pub fn save(&self, conn: &DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { if self.email.trim().is_empty() { err!("Invitation email can't be empty") } @@ -393,7 +401,7 @@ impl Invitation { } } - pub fn delete(self, conn: &DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! {conn: { diesel::delete(invitations::table.filter(invitations::email.eq(self.email))) .execute(conn) @@ -401,7 +409,7 @@ impl Invitation { }} } - pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option { + pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option { let lower_mail = mail.to_lowercase(); db_run! {conn: { invitations::table @@ -412,9 +420,9 @@ impl Invitation { }} } - pub fn take(mail: &str, conn: &DbConn) -> bool { - match Self::find_by_mail(mail, conn) { - Some(invitation) => invitation.delete(conn).is_ok(), + pub async fn take(mail: &str, conn: &DbConn) -> bool { + match Self::find_by_mail(mail, conn).await { + Some(invitation) => invitation.delete(conn).await.is_ok(), None => false, } } diff --git a/src/main.rs b/src/main.rs index bcbf1d29..17305a5e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,6 @@ #![forbid(unsafe_code)] +// #![warn(rust_2018_idioms)] +#![warn(rust_2021_compatibility)] #![cfg_attr(feature = "unstable", feature(ip))] // The recursion_limit is mainly triggered by the json!() macro. // The more key/value pairs there are the more recursion occurs. @@ -72,7 +74,7 @@ async fn main() -> Result<(), Error> { let pool = create_db_pool(); schedule_jobs(pool.clone()).await; - crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().await.unwrap()).unwrap(); + crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().await.unwrap()).await.unwrap(); launch_rocket(pool, extra_debug).await // Blocks until program termination. } From f38926d666bc9cabaa95c7bba46eb3894aa80dce Mon Sep 17 00:00:00 2001 From: BlackDex Date: Fri, 19 Nov 2021 17:50:16 +0100 Subject: [PATCH 3/8] Upd Dockerfiles, crates. Fixed rust 2018 idioms - Updated crates - Fixed Dockerfiles to build using the rust stable version - Enabled warnings for rust 2018 idioms and fixed them. --- docker/Dockerfile.j2 | 5 ++++- docker/amd64/Dockerfile | 5 ++++- docker/amd64/Dockerfile.alpine | 5 ++++- docker/amd64/Dockerfile.buildx | 5 ++++- docker/amd64/Dockerfile.buildx.alpine | 5 ++++- docker/arm64/Dockerfile | 5 ++++- docker/arm64/Dockerfile.alpine | 5 ++++- docker/arm64/Dockerfile.buildx | 5 ++++- docker/arm64/Dockerfile.buildx.alpine | 5 ++++- docker/armv6/Dockerfile | 5 ++++- docker/armv6/Dockerfile.alpine | 5 ++++- docker/armv6/Dockerfile.buildx | 5 ++++- docker/armv6/Dockerfile.buildx.alpine | 5 ++++- docker/armv7/Dockerfile | 5 ++++- docker/armv7/Dockerfile.alpine | 5 ++++- docker/armv7/Dockerfile.buildx | 5 ++++- docker/armv7/Dockerfile.buildx.alpine | 5 ++++- src/api/admin.rs | 6 +++--- src/auth.rs | 6 +++--- src/config.rs | 4 ++-- src/error.rs | 6 +++--- src/main.rs | 3 +-- src/util.rs | 8 ++++---- 23 files changed, 84 insertions(+), 34 deletions(-) diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index c615f0db..2cffc647 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -241,7 +241,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault {% if package_arch_target is defined %} COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden . @@ -255,5 +254,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/amd64/Dockerfile b/docker/amd64/Dockerfile index dcc62eda..3af0f411 100644 --- a/docker/amd64/Dockerfile +++ b/docker/amd64/Dockerfile @@ -115,7 +115,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/release/vaultwarden . @@ -125,5 +124,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/amd64/Dockerfile.alpine b/docker/amd64/Dockerfile.alpine index ea079917..189f50e6 100644 --- a/docker/amd64/Dockerfile.alpine +++ b/docker/amd64/Dockerfile.alpine @@ -107,7 +107,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden . @@ -117,5 +116,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/amd64/Dockerfile.buildx b/docker/amd64/Dockerfile.buildx index 09c43d8e..05b6b71d 100644 --- a/docker/amd64/Dockerfile.buildx +++ b/docker/amd64/Dockerfile.buildx @@ -115,7 +115,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/release/vaultwarden . @@ -125,5 +124,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/amd64/Dockerfile.buildx.alpine b/docker/amd64/Dockerfile.buildx.alpine index 6ec91417..066b8fe1 100644 --- a/docker/amd64/Dockerfile.buildx.alpine +++ b/docker/amd64/Dockerfile.buildx.alpine @@ -107,7 +107,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden . @@ -117,5 +116,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/arm64/Dockerfile b/docker/arm64/Dockerfile index 0e166e6e..d3a32dc4 100644 --- a/docker/arm64/Dockerfile +++ b/docker/arm64/Dockerfile @@ -139,7 +139,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden . @@ -149,5 +148,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/arm64/Dockerfile.alpine b/docker/arm64/Dockerfile.alpine index de37ac18..6890d7bf 100644 --- a/docker/arm64/Dockerfile.alpine +++ b/docker/arm64/Dockerfile.alpine @@ -111,7 +111,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden . @@ -121,5 +120,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/arm64/Dockerfile.buildx b/docker/arm64/Dockerfile.buildx index b6e4f570..b93cd90e 100644 --- a/docker/arm64/Dockerfile.buildx +++ b/docker/arm64/Dockerfile.buildx @@ -139,7 +139,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden . @@ -149,5 +148,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/arm64/Dockerfile.buildx.alpine b/docker/arm64/Dockerfile.buildx.alpine index 18ce076f..dd4107c6 100644 --- a/docker/arm64/Dockerfile.buildx.alpine +++ b/docker/arm64/Dockerfile.buildx.alpine @@ -111,7 +111,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden . @@ -121,5 +120,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv6/Dockerfile b/docker/armv6/Dockerfile index 050bcc65..e9e6d4bb 100644 --- a/docker/armv6/Dockerfile +++ b/docker/armv6/Dockerfile @@ -139,7 +139,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden . @@ -149,5 +148,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv6/Dockerfile.alpine b/docker/armv6/Dockerfile.alpine index ef5f0244..19f7f936 100644 --- a/docker/armv6/Dockerfile.alpine +++ b/docker/armv6/Dockerfile.alpine @@ -111,7 +111,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden . @@ -121,5 +120,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv6/Dockerfile.buildx b/docker/armv6/Dockerfile.buildx index d79409cc..7d6131bf 100644 --- a/docker/armv6/Dockerfile.buildx +++ b/docker/armv6/Dockerfile.buildx @@ -139,7 +139,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden . @@ -149,5 +148,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv6/Dockerfile.buildx.alpine b/docker/armv6/Dockerfile.buildx.alpine index 0abcf50c..5e9d68f9 100644 --- a/docker/armv6/Dockerfile.buildx.alpine +++ b/docker/armv6/Dockerfile.buildx.alpine @@ -111,7 +111,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden . @@ -121,5 +120,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv7/Dockerfile b/docker/armv7/Dockerfile index 18c73176..3ac3f106 100644 --- a/docker/armv7/Dockerfile +++ b/docker/armv7/Dockerfile @@ -139,7 +139,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden . @@ -149,5 +148,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv7/Dockerfile.alpine b/docker/armv7/Dockerfile.alpine index 26624ee1..1ed36519 100644 --- a/docker/armv7/Dockerfile.alpine +++ b/docker/armv7/Dockerfile.alpine @@ -114,7 +114,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden . @@ -124,5 +123,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv7/Dockerfile.buildx b/docker/armv7/Dockerfile.buildx index 18420adb..8df0f309 100644 --- a/docker/armv7/Dockerfile.buildx +++ b/docker/armv7/Dockerfile.buildx @@ -139,7 +139,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden . @@ -149,5 +148,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/docker/armv7/Dockerfile.buildx.alpine b/docker/armv7/Dockerfile.buildx.alpine index 2dae7cf0..56d8e7ff 100644 --- a/docker/armv7/Dockerfile.buildx.alpine +++ b/docker/armv7/Dockerfile.buildx.alpine @@ -114,7 +114,6 @@ EXPOSE 3012 # Copies the files from the context (Rocket.toml file and web-vault) # and the binary from the "build" stage to the current stage WORKDIR / -COPY Rocket.toml . COPY --from=vault /web-vault ./web-vault COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden . @@ -124,5 +123,9 @@ COPY docker/start.sh /start.sh HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] # Configures the startup! +# We should be able to remove the dumb-init now with Rocket 0.5 +# But the balenalib images have some issues with there entry.sh +# See: https://github.com/balena-io-library/base-images/issues/735 +# Lets keep using dumb-init for now, since that is working fine. ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/start.sh"] diff --git a/src/api/admin.rs b/src/api/admin.rs index 7d81ec7b..015ec7c7 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -141,7 +141,7 @@ fn admin_url(referer: Referer) -> String { } #[get("/", rank = 2)] -fn admin_login(flash: Option) -> ApiResult> { +fn admin_login(flash: Option>) -> ApiResult> { // If there is an error, show it let msg = flash.map(|msg| format!("{}: {}", msg.kind(), msg.message())); let json = json!({ @@ -164,7 +164,7 @@ struct LoginForm { #[post("/", data = "")] fn post_admin_login( data: Form, - cookies: &CookieJar, + cookies: &CookieJar<'_>, ip: ClientIp, referer: Referer, ) -> Result> { @@ -300,7 +300,7 @@ fn test_smtp(data: Json, _token: AdminToken) -> EmptyResult { } #[get("/logout")] -fn logout(cookies: &CookieJar, referer: Referer) -> Redirect { +fn logout(cookies: &CookieJar<'_>, referer: Referer) -> Redirect { cookies.remove(Cookie::named(COOKIE_NAME)); Redirect::to(admin_url(referer)) } diff --git a/src/auth.rs b/src/auth.rs index 7eaf1494..a2d64b30 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -38,7 +38,7 @@ static PRIVATE_RSA_KEY: Lazy = Lazy::new(|| { static PUBLIC_RSA_KEY_VEC: Lazy> = Lazy::new(|| { read_file(&CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e)) }); -static PUBLIC_RSA_KEY: Lazy = Lazy::new(|| { +static PUBLIC_RSA_KEY: Lazy> = Lazy::new(|| { DecodingKey::from_rsa_pem(&PUBLIC_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{}", e)) }); @@ -411,7 +411,7 @@ pub struct OrgHeaders { // org_id is usually the second path param ("/organizations/"), // but there are cases where it is a query value. // First check the path, if this is not a valid uuid, try the query values. -fn get_org_id(request: &Request) -> Option { +fn get_org_id(request: &Request<'_>) -> Option { if let Some(Ok(org_id)) = request.param::(1) { if uuid::Uuid::parse_str(&org_id).is_ok() { return Some(org_id); @@ -512,7 +512,7 @@ impl From for Headers { // col_id is usually the fourth path param ("/organizations//collections/"), // but there could be cases where it is a query value. // First check the path, if this is not a valid uuid, try the query values. -fn get_col_id(request: &Request) -> Option { +fn get_col_id(request: &Request<'_>) -> Option { if let Some(Ok(col_id)) = request.param::(3) { if uuid::Uuid::parse_str(&col_id).is_ok() { return Some(col_id); diff --git a/src/config.rs b/src/config.rs index efe38754..d2a52ef9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1011,7 +1011,7 @@ where fn case_helper<'reg, 'rc>( h: &Helper<'reg, 'rc>, - r: &'reg Handlebars, + r: &'reg Handlebars<'_>, ctx: &'rc Context, rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, @@ -1028,7 +1028,7 @@ fn case_helper<'reg, 'rc>( fn js_escape_helper<'reg, 'rc>( h: &Helper<'reg, 'rc>, - _r: &'reg Handlebars, + _r: &'reg Handlebars<'_>, _ctx: &'rc Context, _rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, diff --git a/src/error.rs b/src/error.rs index babe82ad..d7d49fca 100644 --- a/src/error.rs +++ b/src/error.rs @@ -24,7 +24,7 @@ macro_rules! make_error { } } impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.error {$( ErrorKind::$name(e) => f.write_str(&$usr_msg_fun(e, &self.message)), )+} @@ -93,7 +93,7 @@ make_error! { } impl std::fmt::Debug for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.source() { Some(e) => write!(f, "{}.\n[CAUSE] {:#?}", self.message, e), None => match self.error { @@ -196,7 +196,7 @@ use rocket::request::Request; use rocket::response::{self, Responder, Response}; impl<'r> Responder<'r, 'static> for Error { - fn respond_to(self, _: &Request) -> response::Result<'static> { + fn respond_to(self, _: &Request<'_>) -> response::Result<'static> { match self.error { ErrorKind::Empty(_) => {} // Don't print the error in this situation ErrorKind::Simple(_) => {} // Don't print the error in this situation diff --git a/src/main.rs b/src/main.rs index 17305a5e..cb382723 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,5 @@ #![forbid(unsafe_code)] -// #![warn(rust_2018_idioms)] +#![warn(rust_2018_idioms)] #![warn(rust_2021_compatibility)] #![cfg_attr(feature = "unstable", feature(ip))] // The recursion_limit is mainly triggered by the json!() macro. @@ -8,7 +8,6 @@ // If you go above 128 it will cause rust-analyzer to fail, #![recursion_limit = "87"] -extern crate openssl; #[macro_use] extern crate rocket; #[macro_use] diff --git a/src/util.rs b/src/util.rs index 323df413..510c0cf2 100644 --- a/src/util.rs +++ b/src/util.rs @@ -52,7 +52,7 @@ impl Fairing for AppHeaders { pub struct Cors(); impl Cors { - fn get_header(headers: &HeaderMap, name: &str) -> String { + fn get_header(headers: &HeaderMap<'_>, name: &str) -> String { match headers.get_one(name) { Some(h) => h.to_string(), _ => "".to_string(), @@ -61,7 +61,7 @@ impl Cors { // Check a request's `Origin` header against the list of allowed origins. // If a match exists, return it. Otherwise, return None. - fn get_allowed_origin(headers: &HeaderMap) -> Option { + fn get_allowed_origin(headers: &HeaderMap<'_>) -> Option { let origin = Cors::get_header(headers, "Origin"); let domain_origin = CONFIG.domain_origin(); let safari_extension_origin = "file://"; @@ -157,7 +157,7 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache pub struct SafeString(String); impl std::fmt::Display for SafeString { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } @@ -500,7 +500,7 @@ struct UpCaseVisitor; impl<'de> Visitor<'de> for UpCaseVisitor { type Value = Value; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("an object or an array") } From fd9693b9611438deb1c1e85237c25053283e04ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Garc=C3=ADa?= Date: Mon, 22 Nov 2021 00:01:23 +0100 Subject: [PATCH 4/8] await the mutex in db_run and use block_in_place for it's contents --- src/db/mod.rs | 99 +++++++++++++++------------------------------------ 1 file changed, 28 insertions(+), 71 deletions(-) diff --git a/src/db/mod.rs b/src/db/mod.rs index aeceda5a..6fcb63e5 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -206,87 +206,44 @@ macro_rules! db_run { // Different code for each db ( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ #[allow(unused)] use diesel::prelude::*; + #[allow(unused)] use crate::db::FromDb; - // It is important that this inner Arc> (or the OwnedMutexGuard - // derived from it) never be a variable on the stack at an await point, - // where Drop might be called at any time. This causes (synchronous) - // Drop to be called from asynchronous code, which some database - // wrappers do not or can not handle. let conn = $conn.conn.clone(); + let mut conn = conn.lock_owned().await; + match conn.as_mut().expect("internal invariant broken: self.connection is Some") { + $($( + #[cfg($db)] + crate::db::DbConnInner::$db($conn) => { + paste::paste! { + #[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *}; + #[allow(unused)] use [<__ $db _model>]::*; + } - // Since connection can't be on the stack in an async fn during an - // await, we have to spawn a new blocking-safe thread... - /* - run_blocking(move || { - // And then re-enter the runtime to wait on the async mutex, but in - // a blocking fashion. - let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned()); - let conn = conn.as_mut().expect("internal invariant broken: self.connection is Some"); - */ - let mut __conn_mutex = conn.try_lock_owned().unwrap(); - let conn = __conn_mutex.as_mut().unwrap(); - match conn { - $($( - #[cfg($db)] - crate::db::DbConnInner::$db($conn) => { - paste::paste! { - #[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *}; - #[allow(unused)] use [<__ $db _model>]::*; - #[allow(unused)] use crate::db::FromDb; - } - - /* - // Since connection can't be on the stack in an async fn during an - // await, we have to spawn a new blocking-safe thread... - run_blocking(move || { - // And then re-enter the runtime to wait on the async mutex, but in - // a blocking fashion. - let mut conn = tokio::runtime::Handle::current().block_on(async { - conn.lock_owned().await - }); - - let conn = conn.as_mut().expect("internal invariant broken: self.connection is Some"); - f(conn) - }).await;*/ - - $body - }, - )+)+ - } - // }).await + tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead + }, + )+)+ + } }}; ( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ #[allow(unused)] use diesel::prelude::*; + #[allow(unused)] use crate::db::FromDb; - // It is important that this inner Arc> (or the OwnedMutexGuard - // derived from it) never be a variable on the stack at an await point, - // where Drop might be called at any time. This causes (synchronous) - // Drop to be called from asynchronous code, which some database - // wrappers do not or can not handle. let conn = $conn.conn.clone(); + let mut conn = conn.lock_owned().await; + match conn.as_mut().expect("internal invariant broken: self.connection is Some") { + $($( + #[cfg($db)] + crate::db::DbConnInner::$db($conn) => { + paste::paste! { + #[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *}; + // @ RAW: #[allow(unused)] use [<__ $db _model>]::*; + } - // Since connection can't be on the stack in an async fn during an - // await, we have to spawn a new blocking-safe thread... - run_blocking(move || { - // And then re-enter the runtime to wait on the async mutex, but in - // a blocking fashion. - let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned()); - match conn.as_mut().expect("internal invariant broken: self.connection is Some") { - $($( - #[cfg($db)] - crate::db::DbConnInner::$db($conn) => { - paste::paste! { - #[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *}; - // @RAW: #[allow(unused)] use [<__ $db _model>]::*; - #[allow(unused)] use crate::db::FromDb; - } - - $body - }, - )+)+ - } - }).await + tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead + }, + )+)+ + } }}; } From 5125fdb88203c83aae75bbb2c5d94c15e9c5a085 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Garc=C3=ADa?= Date: Tue, 28 Dec 2021 00:48:33 +0100 Subject: [PATCH 5/8] Ignore unused field --- src/api/identity.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/api/identity.rs b/src/api/identity.rs index 2c04990b..da079b7d 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -495,6 +495,7 @@ struct ConnectData { #[field(name = uncased("device_type"))] #[field(name = uncased("devicetype"))] device_type: Option, + #[allow(unused)] #[field(name = uncased("device_push_token"))] #[field(name = uncased("devicepushtoken"))] _device_push_token: Option, // Unused; mobile device push not yet supported. From d781981bbdbe78ef34850b5b3359e38b714d29de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Garc=C3=ADa?= Date: Sun, 30 Jan 2022 22:03:27 +0100 Subject: [PATCH 6/8] formatting --- src/api/core/accounts.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs index af4e0796..1408ac6f 100644 --- a/src/api/core/accounts.rs +++ b/src/api/core/accounts.rs @@ -664,7 +664,12 @@ fn verify_password(data: JsonUpcase, headers: Headers Ok(()) } -async fn _api_key(data: JsonUpcase, rotate: bool, headers: Headers, conn: DbConn) -> JsonResult { +async fn _api_key( + data: JsonUpcase, + rotate: bool, + headers: Headers, + conn: DbConn, +) -> JsonResult { let data: SecretVerificationRequest = data.into_inner().data; let mut user = headers.user; From 5f01db69ffdb3d37e24e30a7003792ed72882973 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Mon, 7 Feb 2022 22:26:22 +0100 Subject: [PATCH 7/8] Update async to prepare for main merge - Changed nightly to stable in Dockerfile and Workflow - Updated Dockerfile to use stable and updated ENV's - Removed 0.0.0.0 as default addr it now uses ROCKET_ADDRESS or the default - Updated Github Workflow actions to the latest versions - Updated Hadolint version - Re-orderd the Cargo.toml file a bit and put libs together which are linked - Updated some libs - Updated .dockerignore file --- .dockerignore | 17 +- .github/workflows/build.yml | 22 +- .github/workflows/hadolint.yml | 4 +- .github/workflows/release.yml | 4 +- Cargo.lock | 469 +++++++++++++++++++------- Cargo.toml | 155 ++++----- docker/Dockerfile.buildx | 1 + docker/Dockerfile.j2 | 14 +- docker/amd64/Dockerfile | 6 +- docker/amd64/Dockerfile.alpine | 6 +- docker/amd64/Dockerfile.buildx | 6 +- docker/amd64/Dockerfile.buildx.alpine | 6 +- docker/arm64/Dockerfile | 6 +- docker/arm64/Dockerfile.alpine | 6 +- docker/arm64/Dockerfile.buildx | 6 +- docker/arm64/Dockerfile.buildx.alpine | 6 +- docker/armv6/Dockerfile | 6 +- docker/armv6/Dockerfile.alpine | 6 +- docker/armv6/Dockerfile.buildx | 6 +- docker/armv6/Dockerfile.buildx.alpine | 6 +- docker/armv7/Dockerfile | 6 +- docker/armv7/Dockerfile.alpine | 6 +- docker/armv7/Dockerfile.buildx | 6 +- docker/armv7/Dockerfile.buildx.alpine | 6 +- src/main.rs | 1 - 25 files changed, 507 insertions(+), 276 deletions(-) diff --git a/.dockerignore b/.dockerignore index 69f51d2a..c7ffe132 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,13 +3,18 @@ target # Data folder data + +# Misc .env .env.template .gitattributes +.gitignore +rustfmt.toml # IDE files .vscode .idea +.editorconfig *.iml # Documentation @@ -19,9 +24,17 @@ data *.yml *.yaml -# Docker folders +# Docker hooks tools +Dockerfile +.dockerignore +docker/** +!docker/healthcheck.sh +!docker/start.sh # Web vault -web-vault \ No newline at end of file +web-vault + +# Vaultwarden Resources +resources diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f92e6e54..465cf2a5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,14 +30,14 @@ jobs: fail-fast: false matrix: channel: - - nightly + - stable target-triple: - x86_64-unknown-linux-gnu include: - target-triple: x86_64-unknown-linux-gnu host-triple: x86_64-unknown-linux-gnu features: [sqlite,mysql,postgresql] # Remember to update the `cargo test` to match the amount of features - channel: nightly + channel: stable os: ubuntu-20.04 ext: "" @@ -46,7 +46,7 @@ jobs: steps: # Checkout the repo - name: Checkout - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4 + uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # v2.4.0 # End Checkout the repo @@ -82,28 +82,28 @@ jobs: # Run cargo tests (In release mode to speed up future builds) # First test all features together, afterwards test them separately. - name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`" - uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 + uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 with: command: test args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} # Test single features # 0: sqlite - name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`" - uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 + uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 with: command: test args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }} if: ${{ matrix.features[0] != '' }} # 1: mysql - name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`" - uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 + uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 with: command: test args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }} if: ${{ matrix.features[1] != '' }} # 2: postgresql - name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`" - uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 + uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 with: command: test args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }} @@ -113,7 +113,7 @@ jobs: # Run cargo clippy, and fail on warnings (In release mode to speed up future builds) - name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`" - uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 + uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 with: command: clippy args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings @@ -122,7 +122,7 @@ jobs: # Run cargo fmt - name: '`cargo fmt`' - uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 + uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 with: command: fmt args: --all -- --check @@ -131,7 +131,7 @@ jobs: # Build the binary - name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`" - uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 + uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3 with: command: build args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} @@ -140,7 +140,7 @@ jobs: # Upload artifact to Github Actions - name: Upload artifact - uses: actions/upload-artifact@27121b0bdffd731efa15d66772be8dc71245d074 # v2.2.4 + uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2.3.1 with: name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }} path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }} diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml index 375e437a..4b95d653 100644 --- a/.github/workflows/hadolint.yml +++ b/.github/workflows/hadolint.yml @@ -16,7 +16,7 @@ jobs: steps: # Checkout the repo - name: Checkout - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4 + uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # v2.4.0 # End Checkout the repo @@ -27,7 +27,7 @@ jobs: sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \ sudo chmod +x /usr/local/bin/hadolint env: - HADOLINT_VERSION: 2.7.0 + HADOLINT_VERSION: 2.8.0 # End Download hadolint # Test Dockerfiles diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4b425c01..00711726 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -60,13 +60,13 @@ jobs: steps: # Checkout the repo - name: Checkout - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4 + uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # v2.4.0 with: fetch-depth: 0 # Login to Docker Hub - name: Login to Docker Hub - uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 # v1.10.0 + uses: docker/login-action@42d299face0c5c43a0487c477f595ac9cf22f1a7 # v1.12.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index 63504f0a..9516efe5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,41 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "aead" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" +dependencies = [ + "generic-array 0.14.5", +] + +[[package]] +name = "aes" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" +dependencies = [ + "cfg-if 1.0.0", + "cipher", + "cpufeatures", + "opaque-debug 0.3.0", +] + +[[package]] +name = "aes-gcm" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + [[package]] name = "aho-corasick" version = "0.7.18" @@ -118,9 +153,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" @@ -194,6 +229,15 @@ dependencies = [ "generic-array 0.14.5", ] +[[package]] +name = "block-buffer" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +dependencies = [ + "generic-array 0.14.5", +] + [[package]] name = "block-padding" version = "0.1.5" @@ -260,9 +304,9 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" [[package]] name = "cfg-if" @@ -296,7 +340,7 @@ dependencies = [ "num-integer", "num-traits", "serde", - "time 0.1.44", + "time 0.1.43", "winapi 0.3.9", ] @@ -322,6 +366,15 @@ dependencies = [ "phf_codegen 0.10.0", ] +[[package]] +name = "cipher" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" +dependencies = [ + "generic-array 0.14.5", +] + [[package]] name = "const_fn" version = "0.4.9" @@ -345,7 +398,14 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05" dependencies = [ + "aes-gcm", + "base64 0.13.0", + "hkdf", + "hmac 0.12.1", "percent-encoding 2.1.0", + "rand 0.8.5", + "sha2 0.10.2", + "subtle", "time 0.3.7", "version_check", ] @@ -368,9 +428,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -393,9 +453,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if 1.0.0", ] @@ -413,14 +473,24 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" +checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6" dependencies = [ "cfg-if 1.0.0", "lazy_static", ] +[[package]] +name = "crypto-common" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +dependencies = [ + "generic-array 0.14.5", + "typenum", +] + [[package]] name = "crypto-mac" version = "0.10.1" @@ -441,6 +511,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "ctr" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" +dependencies = [ + "cipher", +] + [[package]] name = "ctrlc" version = "3.2.1" @@ -453,12 +532,13 @@ dependencies = [ [[package]] name = "dashmap" -version = "4.0.2" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +checksum = "c0834a35a3fce649144119e18da2a4d8ed12ef3862f47183fd46f625d072d96c" dependencies = [ "cfg-if 1.0.0", "num_cpus", + "parking_lot 0.12.0", ] [[package]] @@ -565,6 +645,17 @@ dependencies = [ "generic-array 0.14.5", ] +[[package]] +name = "digest" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +dependencies = [ + "block-buffer 0.10.2", + "crypto-common", + "subtle", +] + [[package]] name = "discard" version = "1.0.4" @@ -719,9 +810,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futf" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c9c1ce3fa9336301af935ab852c437817d14cd33690446569392e65170aac3b" +checksum = "df420e2e84819663797d1ec6544b13c5be84629e7bb00dc960d6917db2987843" dependencies = [ "mac", "new_debug_unreachable", @@ -729,9 +820,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" +checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ "futures-channel", "futures-core", @@ -744,9 +835,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -754,15 +845,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-executor" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" +checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" dependencies = [ "futures-core", "futures-task", @@ -771,15 +862,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-macro" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ "proc-macro2", "quote", @@ -788,15 +879,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-timer" @@ -806,9 +897,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ "futures-channel", "futures-core", @@ -873,7 +964,17 @@ checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", +] + +[[package]] +name = "ghash" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" +dependencies = [ + "opaque-debug 0.3.0", + "polyval", ] [[package]] @@ -890,18 +991,18 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "governor" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7df0ee4b237afb71e99f7e2fbd840ffec2d6c4bb569f69b2af18aa1f63077d38" +checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ "dashmap", "futures", "futures-timer", "no-std-compat", "nonzero_ext", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "quanta", - "rand 0.8.4", + "rand 0.8.5", "smallvec 1.8.0", ] @@ -920,7 +1021,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util", + "tokio-util 0.6.9", "tracing", ] @@ -969,6 +1070,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac 0.12.1", +] + [[package]] name = "hmac" version = "0.10.1" @@ -989,6 +1099,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "hostname" version = "0.3.1" @@ -1022,7 +1141,7 @@ checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes 1.1.0", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] @@ -1038,9 +1157,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" [[package]] name = "httpdate" @@ -1050,9 +1169,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "043f0e083e9901b6cc658a77d1eb86f4fc650bbb977a4337dd63192826aa85dd" dependencies = [ "bytes 1.1.0", "futures-channel", @@ -1063,7 +1182,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa", "pin-project-lite", "socket2 0.4.4", "tokio", @@ -1160,12 +1279,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.1" @@ -1250,9 +1363,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.116" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "565dbd88872dbe4cc8a46e527f26483c1d1f7afa6b884a3bd6cd893d4f98da74" +checksum = "06e509672465a0504304aa87f9f176f2b2b716ed8fb105ebe5c02dc6dce96a94" [[package]] name = "libsqlite3-sys" @@ -1466,9 +1579,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" dependencies = [ "libc", "log", @@ -1526,7 +1639,7 @@ dependencies = [ "mime", "spin 0.9.2", "tokio", - "tokio-util", + "tokio-util 0.6.9", "version_check", ] @@ -1622,9 +1735,9 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" dependencies = [ "winapi 0.3.9", ] @@ -1807,6 +1920,16 @@ dependencies = [ "parking_lot_core 0.8.5", ] +[[package]] +name = "parking_lot" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.1", +] + [[package]] name = "parking_lot_core" version = "0.2.14" @@ -1833,6 +1956,19 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "parking_lot_core" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "smallvec 1.8.0", + "windows-sys", +] + [[package]] name = "parse-zoneinfo" version = "0.3.0" @@ -1992,7 +2128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" dependencies = [ "phf_shared 0.10.0", - "rand 0.8.4", + "rand 0.8.5", ] [[package]] @@ -2038,6 +2174,18 @@ version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" +[[package]] +name = "polyval" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "opaque-debug 0.3.0", + "universal-hash", +] + [[package]] name = "ppv-lite86" version = "0.2.16" @@ -2116,7 +2264,7 @@ dependencies = [ "mach", "once_cell", "raw-cpuid", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", "web-sys", "winapi 0.3.9", ] @@ -2182,20 +2330,19 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_hc", "rand_pcg", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.3", - "rand_hc 0.3.1", ] [[package]] @@ -2260,15 +2407,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - [[package]] name = "rand_pcg" version = "0.2.1" @@ -2394,7 +2532,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-socks", - "tokio-util", + "tokio-util 0.6.9", "trust-dns-resolver", "url 2.2.2", "wasm-bindgen", @@ -2451,7 +2589,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-rc.1" -source = "git+https://github.com/SergioBenitez/Rocket?rev=8cae077ba1d54b92cdef3e171a730b819d5eeb8e#8cae077ba1d54b92cdef3e171a730b819d5eeb8e" +source = "git+https://github.com/SergioBenitez/Rocket?rev=66d18bf66517e2765494d082629e9b9748ff8ad6#66d18bf66517e2765494d082629e9b9748ff8ad6" dependencies = [ "async-stream", "async-trait", @@ -2467,9 +2605,9 @@ dependencies = [ "memchr", "multer", "num_cpus", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "pin-project-lite", - "rand 0.8.4", + "rand 0.8.5", "ref-cast", "rocket_codegen", "rocket_http", @@ -2480,7 +2618,7 @@ dependencies = [ "time 0.3.7", "tokio", "tokio-stream", - "tokio-util", + "tokio-util 0.7.0", "ubyte", "version_check", "yansi", @@ -2489,7 +2627,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-rc.1" -source = "git+https://github.com/SergioBenitez/Rocket?rev=8cae077ba1d54b92cdef3e171a730b819d5eeb8e#8cae077ba1d54b92cdef3e171a730b819d5eeb8e" +source = "git+https://github.com/SergioBenitez/Rocket?rev=66d18bf66517e2765494d082629e9b9748ff8ad6#66d18bf66517e2765494d082629e9b9748ff8ad6" dependencies = [ "devise", "glob", @@ -2504,7 +2642,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-rc.1" -source = "git+https://github.com/SergioBenitez/Rocket?rev=8cae077ba1d54b92cdef3e171a730b819d5eeb8e#8cae077ba1d54b92cdef3e171a730b819d5eeb8e" +source = "git+https://github.com/SergioBenitez/Rocket?rev=66d18bf66517e2765494d082629e9b9748ff8ad6#66d18bf66517e2765494d082629e9b9748ff8ad6" dependencies = [ "cookie 0.16.0", "either", @@ -2620,9 +2758,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fed7948b6c68acbb6e20c334f55ad635dc0f75506963de4464289fbd3b051ac" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" dependencies = [ "bitflags", "core-foundation", @@ -2633,9 +2771,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57321bf8bc2362081b2599912d2961fe899c0efadf1b4b2f8d48b3e253bb96c" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" dependencies = [ "core-foundation-sys", "libc", @@ -2688,11 +2826,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" dependencies = [ - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2704,7 +2842,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2762,6 +2900,17 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha2" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.3", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -2935,14 +3084,14 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "string_cache" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "923f0f39b6267d37d23ce71ae7235602134b250ace715dd2c90421998ddac0c6" +checksum = "33994d0838dc2d152d17a62adf608a869b5e846b65b389af7f3dbc1de45c5b26" dependencies = [ "lazy_static", "new_debug_unreachable", "parking_lot 0.11.2", - "phf_shared 0.8.0", + "phf_shared 0.10.0", "precomputed-hash", "serde", ] @@ -2985,7 +3134,7 @@ dependencies = [ "error-chain", "libc", "log", - "time 0.1.44", + "time 0.1.43", ] [[package]] @@ -3053,12 +3202,11 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] @@ -3083,7 +3231,7 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "004cbc98f30fa233c61a38bc77e96a9106e65c88f2d3bef182ae952027e5753d" dependencies = [ - "itoa 1.0.1", + "itoa", "libc", "num_threads", "time-macros 0.2.3", @@ -3135,19 +3283,20 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.16.1" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ "bytes 1.1.0", "libc", "memchr", - "mio 0.7.14", + "mio 0.8.0", "num_cpus", "once_cell", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "pin-project-lite", "signal-hook-registry", + "socket2 0.4.4", "tokio-macros", "winapi 0.3.9", ] @@ -3221,6 +3370,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-util" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64910e1b9c1901aaf5375561e35b9c057d95ff41a44ede043a03e09279eabaf1" +dependencies = [ + "bytes 1.1.0", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + [[package]] name = "toml" version = "0.5.8" @@ -3239,7 +3402,7 @@ dependencies = [ "digest 0.9.0", "hmac 0.11.0", "sha-1 0.9.8", - "sha2", + "sha2 0.9.9", ] [[package]] @@ -3250,9 +3413,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "f6c650a8ef0cd2dd93736f033d21cbd1224c5a967aa0c258d00fcf7dafef9b9f" dependencies = [ "cfg-if 1.0.0", "log", @@ -3263,9 +3426,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +checksum = "8276d9a4a3a558d7b7ad5303ad50b53d58264641b82914b7ada36bd762e7a716" dependencies = [ "proc-macro2", "quote", @@ -3274,11 +3437,12 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" dependencies = [ "lazy_static", + "valuable", ] [[package]] @@ -3294,9 +3458,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5312f325fe3588e277415f5a6cca1f4ccad0f248c4cd5a4bd33032d7286abc22" +checksum = "9e0ab7bdc962035a87fba73f3acca9b8a8d0034c2e6f60b84aeaaddddc155dce" dependencies = [ "ansi_term", "lazy_static", @@ -3312,9 +3476,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.3" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -3327,7 +3491,7 @@ dependencies = [ "ipnet", "lazy_static", "log", - "rand 0.8.4", + "rand 0.8.5", "smallvec 1.8.0", "thiserror", "tinyvec", @@ -3337,9 +3501,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.3" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +checksum = "ecae383baad9995efaa34ce8e57d12c3f305e545887472a492b838f4b5cfb77a" dependencies = [ "cfg-if 1.0.0", "futures-util", @@ -3381,7 +3545,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "time 0.1.44", + "time 0.1.43", ] [[package]] @@ -3426,9 +3590,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" +checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" [[package]] name = "unicode-xid" @@ -3436,6 +3600,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +[[package]] +name = "universal-hash" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +dependencies = [ + "generic-array 0.14.5", + "subtle", +] + [[package]] name = "untrusted" version = "0.7.1" @@ -3481,6 +3655,12 @@ dependencies = [ "getrandom 0.2.4", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vaultwarden" version = "1.0.0" @@ -3518,7 +3698,7 @@ dependencies = [ "paste", "percent-encoding 2.1.0", "pico-args", - "rand 0.8.4", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -3579,9 +3759,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" @@ -3668,7 +3848,7 @@ dependencies = [ "base64 0.13.0", "nom 7.1.0", "openssl", - "rand 0.8.4", + "rand 0.8.5", "serde", "serde_cbor", "serde_derive", @@ -3737,6 +3917,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" + +[[package]] +name = "windows_i686_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" + +[[package]] +name = "windows_i686_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" + [[package]] name = "winreg" version = "0.6.2" @@ -3774,7 +3997,7 @@ dependencies = [ "log", "mac", "markup5ever", - "time 0.1.44", + "time 0.1.43", ] [[package]] @@ -3793,7 +4016,7 @@ dependencies = [ "crypto-mac 0.10.1", "futures", "hmac 0.10.1", - "rand 0.8.4", + "rand 0.8.5", "reqwest", "sha-1 0.9.8", "threadpool", diff --git a/Cargo.toml b/Cargo.toml index 3cdd3d2b..d67464f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "vaultwarden" version = "1.0.0" authors = ["Daniel García "] edition = "2021" -rust-version = "1.56" +rust-version = "1.58.1" resolver = "2" repository = "https://github.com/dani-garcia/vaultwarden" @@ -27,79 +27,15 @@ vendored_openssl = ["openssl/vendored"] unstable = [] [target."cfg(not(windows))".dependencies] -syslog = "4.0.1" +# Logging +syslog = "4.0.1" # Needs to be v4 until fern is updated [dependencies] -# Web framework -rocket = { version = "0.5.0-rc.1", features = ["tls", "json"], default-features = false } - -# Async futures -futures = "0.3.19" -tokio = { version = "1.16.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot"] } - - # HTTP client -reqwest = { version = "0.11.9", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] } -bytes = "1.1.0" - -# Used for custom short lived cookie jar -cookie = "0.15.1" -cookie_store = "0.15.1" -url = "2.2.2" - -# WebSockets library -ws = { version = "0.11.1", package = "parity-ws" } - -# MessagePack library -rmpv = "1.0.0" - -# Concurrent hashmap implementation -chashmap = "2.2.2" - -# A generic serialization/deserialization framework -serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.78" - # Logging log = "0.4.14" fern = { version = "0.6.0", features = ["syslog-4"] } - -# A safe, extensible ORM and Query builder -diesel = { version = "1.4.8", features = [ "chrono", "r2d2"] } -diesel_migrations = "1.4.0" - -# Bundled SQLite -libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true } - -# Crypto-related libraries -rand = "0.8.4" -ring = "0.16.20" - -# UUID generation -uuid = { version = "0.8.2", features = ["v4"] } - -# Date and time libraries -chrono = { version = "0.4.19", features = ["serde"] } -chrono-tz = "0.6.1" -time = "0.2.27" - -# Job scheduler -job_scheduler = "1.2.1" - -# TOTP library -totp-lite = "1.0.3" - -# Data encoding library -data-encoding = "2.3.2" - -# JWT library -jsonwebtoken = "7.2.0" - -# U2F library -u2f = "0.2.0" -webauthn-rs = "0.3.2" - -# Yubico Library -yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false } +tracing = { version = "0.1.31", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work +backtrace = "0.3.64" # Logging panics to logfile instead stderr only # A `dotenv` implementation for Rust dotenv = { version = "0.15.0", default-features = false } @@ -111,41 +47,100 @@ once_cell = "1.9.0" num-traits = "0.2.14" num-derive = "0.3.3" +# Web framework +rocket = { version = "0.5.0-rc.1", features = ["tls", "json"], default-features = false } + +# WebSockets libraries +ws = { version = "0.11.1", package = "parity-ws" } +rmpv = "1.0.0" # MessagePack library +chashmap = "2.2.2" # Concurrent hashmap implementation + +# Async futures +futures = "0.3.21" +tokio = { version = "1.17.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot"] } + +# A generic serialization/deserialization framework +serde = { version = "1.0.136", features = ["derive"] } +serde_json = "1.0.79" + +# A safe, extensible ORM and Query builder +diesel = { version = "1.4.8", features = [ "chrono", "r2d2"] } +diesel_migrations = "1.4.0" + +# Bundled SQLite +libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true } + +# Crypto-related libraries +rand = "0.8.5" +ring = "0.16.20" + +# UUID generation +uuid = { version = "0.8.2", features = ["v4"] } + +# Date and time libraries +chrono = { version = "0.4.19", features = ["clock", "serde"], default-features = false } +chrono-tz = "0.6.1" +time = "0.2.27" + +# Job scheduler +job_scheduler = "1.2.1" + +# Data encoding library Hex/Base32/Base64 +data-encoding = "2.3.2" + +# JWT library +jsonwebtoken = "7.2.0" + +# TOTP library +totp-lite = "1.0.3" + +# Yubico Library +yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false } + +# U2F libraries +u2f = "0.2.0" +webauthn-rs = "0.3.2" + +# Handling of URL's for WebAuthn +url = "2.2.2" + # Email libraries -tracing = { version = "0.1.29", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled. lettre = { version = "0.10.0-rc.4", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false } +idna = "0.2.3" # Punycode conversion +percent-encoding = "2.1.0" # URL encoding library used for URL's in the emails # Template library handlebars = { version = "4.2.1", features = ["dir_source"] } +# HTTP client +reqwest = { version = "0.11.9", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] } + # For favicon extraction from main website html5ever = "0.25.1" markup5ever_rcdom = "0.1.0" regex = { version = "1.5.4", features = ["std", "perf", "unicode-perl"], default-features = false } data-url = "0.1.1" +bytes = "1.1.0" + +# Used for custom short lived cookie jar during favicon extraction +cookie = "0.15.1" +cookie_store = "0.15.1" # Used by U2F, JWT and Postgres openssl = "0.10.38" -# URL encoding library -percent-encoding = "2.1.0" -# Punycode conversion -idna = "0.2.3" - # CLI argument parsing pico-args = "0.4.2" -# Logging panics to logfile instead stderr only -backtrace = "0.3.64" - # Macro ident concatenation paste = "1.0.6" -governor = "0.4.1" +governor = "0.4.2" +# Capture CTRL+C ctrlc = { version = "3.2.1", features = ["termination"] } [patch.crates-io] -rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '8cae077ba1d54b92cdef3e171a730b819d5eeb8e' } +rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '66d18bf66517e2765494d082629e9b9748ff8ad6' } # The maintainer of the `job_scheduler` crate doesn't seem to have responded # to any issues or PRs for almost a year (as of April 2021). This hopefully diff --git a/docker/Dockerfile.buildx b/docker/Dockerfile.buildx index ed0d23b3..c250312c 100644 --- a/docker/Dockerfile.buildx +++ b/docker/Dockerfile.buildx @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # The cross-built images have the build arch (`amd64`) embedded in the image # manifest, rather than the target arch. For example: # diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index 2cffc647..196af08d 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -6,19 +6,19 @@ {% set build_stage_base_image = "rust:1.58-buster" %} {% if "alpine" in target_file %} {% if "amd64" in target_file %} -{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-nightly-2022-01-23" %} +{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable" %} {% set runtime_stage_base_image = "alpine:3.15" %} {% set package_arch_target = "x86_64-unknown-linux-musl" %} {% elif "armv7" in target_file %} -{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23" %} +{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable" %} {% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.15" %} {% set package_arch_target = "armv7-unknown-linux-musleabihf" %} {% elif "armv6" in target_file %} -{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-nightly-2022-01-23" %} +{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable" %} {% set runtime_stage_base_image = "balenalib/rpi-alpine:3.15" %} {% set package_arch_target = "arm-unknown-linux-musleabi" %} {% elif "arm64" in target_file %} -{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-nightly-2022-01-23" %} +{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable" %} {% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.15" %} {% set package_arch_target = "aarch64-unknown-linux-musl" %} {% endif %} @@ -194,9 +194,9 @@ RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden # because we already have a binary built FROM {{ runtime_stage_base_image }} -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 {%- if "alpine" in runtime_stage_base_image %} \ SSL_CERT_DIR=/etc/ssl/certs {% endif %} diff --git a/docker/amd64/Dockerfile b/docker/amd64/Dockerfile index 3af0f411..c588c8e9 100644 --- a/docker/amd64/Dockerfile +++ b/docker/amd64/Dockerfile @@ -89,9 +89,9 @@ RUN cargo build --features ${DB} --release # because we already have a binary built FROM debian:buster-slim -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # Create data folder and Install needed libraries diff --git a/docker/amd64/Dockerfile.alpine b/docker/amd64/Dockerfile.alpine index 189f50e6..9266da29 100644 --- a/docker/amd64/Dockerfile.alpine +++ b/docker/amd64/Dockerfile.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:x86_64-musl-nightly-2022-01-23 as build +FROM blackdex/rust-musl:x86_64-musl-stable as build @@ -83,9 +83,9 @@ RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl # because we already have a binary built FROM alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/docker/amd64/Dockerfile.buildx b/docker/amd64/Dockerfile.buildx index 05b6b71d..aa61d037 100644 --- a/docker/amd64/Dockerfile.buildx +++ b/docker/amd64/Dockerfile.buildx @@ -89,9 +89,9 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/. # because we already have a binary built FROM debian:buster-slim -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # Create data folder and Install needed libraries diff --git a/docker/amd64/Dockerfile.buildx.alpine b/docker/amd64/Dockerfile.buildx.alpine index 066b8fe1..e0afd4fd 100644 --- a/docker/amd64/Dockerfile.buildx.alpine +++ b/docker/amd64/Dockerfile.buildx.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:x86_64-musl-nightly-2022-01-23 as build +FROM blackdex/rust-musl:x86_64-musl-stable as build @@ -83,9 +83,9 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/. # because we already have a binary built FROM alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/docker/arm64/Dockerfile b/docker/arm64/Dockerfile index d3a32dc4..40bfccc5 100644 --- a/docker/arm64/Dockerfile +++ b/docker/arm64/Dockerfile @@ -109,9 +109,9 @@ RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu # because we already have a binary built FROM balenalib/aarch64-debian:buster -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # hadolint ignore=DL3059 RUN [ "cross-build-start" ] diff --git a/docker/arm64/Dockerfile.alpine b/docker/arm64/Dockerfile.alpine index 6890d7bf..b233ac1b 100644 --- a/docker/arm64/Dockerfile.alpine +++ b/docker/arm64/Dockerfile.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build +FROM blackdex/rust-musl:aarch64-musl-stable as build @@ -83,9 +83,9 @@ RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl # because we already have a binary built FROM balenalib/aarch64-alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/docker/arm64/Dockerfile.buildx b/docker/arm64/Dockerfile.buildx index b93cd90e..27b97333 100644 --- a/docker/arm64/Dockerfile.buildx +++ b/docker/arm64/Dockerfile.buildx @@ -109,9 +109,9 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/. # because we already have a binary built FROM balenalib/aarch64-debian:buster -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # hadolint ignore=DL3059 RUN [ "cross-build-start" ] diff --git a/docker/arm64/Dockerfile.buildx.alpine b/docker/arm64/Dockerfile.buildx.alpine index dd4107c6..521fbd8f 100644 --- a/docker/arm64/Dockerfile.buildx.alpine +++ b/docker/arm64/Dockerfile.buildx.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build +FROM blackdex/rust-musl:aarch64-musl-stable as build @@ -83,9 +83,9 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/. # because we already have a binary built FROM balenalib/aarch64-alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/docker/armv6/Dockerfile b/docker/armv6/Dockerfile index e9e6d4bb..8cf59c4e 100644 --- a/docker/armv6/Dockerfile +++ b/docker/armv6/Dockerfile @@ -109,9 +109,9 @@ RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi # because we already have a binary built FROM balenalib/rpi-debian:buster -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # hadolint ignore=DL3059 RUN [ "cross-build-start" ] diff --git a/docker/armv6/Dockerfile.alpine b/docker/armv6/Dockerfile.alpine index 19f7f936..bdfdc612 100644 --- a/docker/armv6/Dockerfile.alpine +++ b/docker/armv6/Dockerfile.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build +FROM blackdex/rust-musl:arm-musleabi-stable as build @@ -83,9 +83,9 @@ RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi # because we already have a binary built FROM balenalib/rpi-alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/docker/armv6/Dockerfile.buildx b/docker/armv6/Dockerfile.buildx index 7d6131bf..6c6eb562 100644 --- a/docker/armv6/Dockerfile.buildx +++ b/docker/armv6/Dockerfile.buildx @@ -109,9 +109,9 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/. # because we already have a binary built FROM balenalib/rpi-debian:buster -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # hadolint ignore=DL3059 RUN [ "cross-build-start" ] diff --git a/docker/armv6/Dockerfile.buildx.alpine b/docker/armv6/Dockerfile.buildx.alpine index 5e9d68f9..369dfb4b 100644 --- a/docker/armv6/Dockerfile.buildx.alpine +++ b/docker/armv6/Dockerfile.buildx.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build +FROM blackdex/rust-musl:arm-musleabi-stable as build @@ -83,9 +83,9 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/. # because we already have a binary built FROM balenalib/rpi-alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/docker/armv7/Dockerfile b/docker/armv7/Dockerfile index 3ac3f106..5b26b5e1 100644 --- a/docker/armv7/Dockerfile +++ b/docker/armv7/Dockerfile @@ -109,9 +109,9 @@ RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabih # because we already have a binary built FROM balenalib/armv7hf-debian:buster -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # hadolint ignore=DL3059 RUN [ "cross-build-start" ] diff --git a/docker/armv7/Dockerfile.alpine b/docker/armv7/Dockerfile.alpine index 1ed36519..d00017bd 100644 --- a/docker/armv7/Dockerfile.alpine +++ b/docker/armv7/Dockerfile.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23 as build +FROM blackdex/rust-musl:armv7-musleabihf-stable as build @@ -86,9 +86,9 @@ RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden # because we already have a binary built FROM balenalib/armv7hf-alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/docker/armv7/Dockerfile.buildx b/docker/armv7/Dockerfile.buildx index 8df0f309..8c36f605 100644 --- a/docker/armv7/Dockerfile.buildx +++ b/docker/armv7/Dockerfile.buildx @@ -109,9 +109,9 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/. # because we already have a binary built FROM balenalib/armv7hf-debian:buster -ENV ROCKET_ENV="staging" \ - ROCKET_PORT=80 \ - ROCKET_WORKERS=10 +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ + ROCKET_PORT=80 # hadolint ignore=DL3059 RUN [ "cross-build-start" ] diff --git a/docker/armv7/Dockerfile.buildx.alpine b/docker/armv7/Dockerfile.buildx.alpine index 56d8e7ff..a80405d0 100644 --- a/docker/armv7/Dockerfile.buildx.alpine +++ b/docker/armv7/Dockerfile.buildx.alpine @@ -27,7 +27,7 @@ FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault ########################## BUILD IMAGE ########################## -FROM blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23 as build +FROM blackdex/rust-musl:armv7-musleabihf-stable as build @@ -86,9 +86,9 @@ RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden # because we already have a binary built FROM balenalib/armv7hf-alpine:3.15 -ENV ROCKET_ENV="staging" \ +ENV ROCKET_PROFILE="release" \ + ROCKET_ADDRESS=0.0.0.0 \ ROCKET_PORT=80 \ - ROCKET_WORKERS=10 \ SSL_CERT_DIR=/etc/ssl/certs diff --git a/src/main.rs b/src/main.rs index cb382723..08ac9d7a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -329,7 +329,6 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> let basepath = &CONFIG.domain_path(); let mut config = rocket::Config::from(rocket::Config::figment()); - config.address = std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED); // TODO: Allow this to be changed, keep ROCKET_ADDRESS for compat config.temp_dir = canonicalize(CONFIG.tmp_folder()).unwrap().into(); config.limits = Limits::new() // .limit("json", 10.megabytes()) From 42136a70973f60086749c62439c6a965d4589c02 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Tue, 22 Feb 2022 20:48:00 +0100 Subject: [PATCH 8/8] Favicon, SMTP and misc updates Favicon: - Replaced HTML tokenizer, much faster now. - Caching the domain blacklist function. - Almost all functions are async now. - Fixed bug on minimizing data to parse - Changed maximum icon download size to 5MB to match Bitwarden - Added `apple-touch-icon.png` as a second fallback besides `favicon.ico` SMTP: - Deprecated SMTP_SSL and SMTP_EXPLICIT_TLS, replaced with SMTP_SECURITY Misc: - Fixed issue when `resolv.conf` contains errors and trust-dns panics (Fixes #2283) - Updated Javscript and CSS files for admin interface - Fixed an issue with the /admin interface which did not cleared the login cookie correctly - Prevent websocket notifications during org import, this caused a lot of traffic, and slowed down the import. This is also the same as Bitwarden which does not trigger this refresh via websockets. Rust: - Updated to use v1.59 - Use the new `strip` option and enabled to strip `debuginfo` - Enabled `lto` with `thin` - Removed the strip RUN from the alpine armv7, this is now done automatically --- .env.template | 3 +- .pre-commit-config.yaml | 2 +- Cargo.lock | 356 +- Cargo.toml | 14 +- docker/Dockerfile.j2 | 6 - docker/armv7/Dockerfile.alpine | 2 - docker/armv7/Dockerfile.buildx.alpine | 2 - src/api/admin.rs | 4 +- src/api/core/organizations.rs | 4 +- src/api/icons.rs | 534 ++- src/config.rs | 33 +- src/mail.rs | 4 +- src/static/scripts/bootstrap-native.js | 5491 ++++++++++++++++-------- src/static/scripts/datatables.css | 38 +- src/static/scripts/datatables.js | 69 +- src/util.rs | 8 +- 16 files changed, 4390 insertions(+), 2180 deletions(-) diff --git a/.env.template b/.env.template index 8da88cdc..2d0ea32b 100644 --- a/.env.template +++ b/.env.template @@ -331,9 +331,8 @@ # SMTP_HOST=smtp.domain.tld # SMTP_FROM=vaultwarden@domain.tld # SMTP_FROM_NAME=Vaultwarden +# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25) # SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS. -# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default. -# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here. # SMTP_USERNAME=username # SMTP_PASSWORD=password # SMTP_TIMEOUT=15 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b26d8445..f18ddbf1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ --- repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + rev: v4.1.0 hooks: - id: check-yaml - id: check-json diff --git a/Cargo.lock b/Cargo.lock index 9516efe5..9de3455d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -99,6 +99,25 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-mutex" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-rwlock" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261803dcc39ba9e72760ba6e16d0199b1eef9fc44e81bffabbebb9f5aea3906c" +dependencies = [ + "async-mutex", + "event-listener", +] + [[package]] name = "async-stream" version = "0.3.2" @@ -302,6 +321,40 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +[[package]] +name = "cached" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af4dfac631a8e77b2f327f7852bb6172771f5279c4512efe79fad6067b37be3d" +dependencies = [ + "async-mutex", + "async-rwlock", + "async-trait", + "cached_proc_macro", + "cached_proc_macro_types", + "futures", + "hashbrown", + "once_cell", +] + +[[package]] +name = "cached_proc_macro" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "725f434d6da2814b989bd905c62ca28a9383feff7440210dc279665fbbbc9511" +dependencies = [ + "cached_proc_macro_types", + "darling", + "quote", + "syn", +] + +[[package]] +name = "cached_proc_macro_types" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a4f925191b4367301851c6d99b09890311d74b0d43f274c0b34c86d308a3663" + [[package]] name = "cc" version = "1.0.73" @@ -352,7 +405,7 @@ checksum = "58549f1842da3080ce63002102d5bc954c7bc843d4f47818e642abdc36253552" dependencies = [ "chrono", "chrono-tz-build", - "phf 0.10.1", + "phf", ] [[package]] @@ -362,8 +415,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db058d493fb2f65f41861bfed7e3fe6335264a9f0f92710cab5bdf01fef09069" dependencies = [ "parse-zoneinfo", - "phf 0.10.1", - "phf_codegen 0.10.0", + "phf", + "phf_codegen", ] [[package]] @@ -530,6 +583,41 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "darling" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0d720b8683f8dd83c65155f0530560cba68cd2bf395f6513a483caee57ff7f4" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a340f241d2ceed1deb47ae36c4144b2707ec7dd0b649f894cb39bb595986324" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72c41b3b7352feb3211a0d743dc5700a4e3b60f51bd2b368892d1e0f9a95f44b" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "dashmap" version = "5.1.0" @@ -685,9 +773,9 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", "proc-macro2", @@ -704,6 +792,12 @@ dependencies = [ "backtrace", ] +[[package]] +name = "event-listener" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" + [[package]] name = "fake-simd" version = "0.1.2" @@ -808,16 +902,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -[[package]] -name = "futf" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df420e2e84819663797d1ec6544b13c5be84629e7bb00dc960d6917db2987843" -dependencies = [ - "mac", - "new_debug_unreachable", -] - [[package]] name = "futures" version = "0.3.21" @@ -958,9 +1042,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1054,12 +1138,9 @@ checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "heck" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" [[package]] name = "hermit-abi" @@ -1120,17 +1201,12 @@ dependencies = [ ] [[package]] -name = "html5ever" -version = "0.25.1" +name = "html5gum" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafcf38a1a36118242d29b92e1b08ef84e67e4a5ed06e0a80be20e6a32bfed6b" +checksum = "2dad48b66db55322add2819ae1d7bda0c32f3415269a08330679dbc8b0afeb30" dependencies = [ - "log", - "mac", - "markup5ever", - "proc-macro2", - "quote", - "syn", + "jetscii", ] [[package]] @@ -1204,6 +1280,12 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.1.5" @@ -1285,6 +1367,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +[[package]] +name = "jetscii" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9447923c57a8a2d5c1b0875cdf96a6324275df728b498f2ede0e5cbde088a15" + [[package]] name = "job_scheduler" version = "1.2.1" @@ -1363,9 +1451,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.118" +version = "0.2.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e509672465a0504304aa87f9f176f2b2b716ed8fb105ebe5c02dc6dce96a94" +checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4" [[package]] name = "libsqlite3-sys" @@ -1426,12 +1514,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "mac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" - [[package]] name = "mach" version = "0.3.2" @@ -1447,32 +1529,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" -[[package]] -name = "markup5ever" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24f40fb03852d1cdd84330cddcaf98e9ec08a7b7768e952fad3b4cf048ec8fd" -dependencies = [ - "log", - "phf 0.8.0", - "phf_codegen 0.8.0", - "string_cache", - "string_cache_codegen", - "tendril", -] - -[[package]] -name = "markup5ever_rcdom" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f015da43bcd8d4f144559a3423f4591d69b8ce0652c905374da7205df336ae2b" -dependencies = [ - "html5ever", - "markup5ever", - "tendril", - "xml5ever", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -1682,12 +1738,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "new_debug_unreachable" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" - [[package]] name = "nix" version = "0.23.1" @@ -2073,32 +2123,13 @@ dependencies = [ "sha-1 0.8.2", ] -[[package]] -name = "phf" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dfb61232e34fcb633f43d12c58f83c1df82962dcdfa565a4e866ffc17dafe12" -dependencies = [ - "phf_shared 0.8.0", -] - [[package]] name = "phf" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259" dependencies = [ - "phf_shared 0.10.0", -] - -[[package]] -name = "phf_codegen" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbffee61585b0411840d3ece935cce9cb6321f01c45477d30066498cd5e1a815" -dependencies = [ - "phf_generator 0.8.0", - "phf_shared 0.8.0", + "phf_shared", ] [[package]] @@ -2107,18 +2138,8 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb1c3a8bc4dd4e5cfce29b44ffc14bedd2ee294559a294e2a4d4c9e9a6a13cd" dependencies = [ - "phf_generator 0.10.0", - "phf_shared 0.10.0", -] - -[[package]] -name = "phf_generator" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17367f0cc86f2d25802b2c26ee58a7b23faeccf78a396094c13dced0d0182526" -dependencies = [ - "phf_shared 0.8.0", - "rand 0.7.3", + "phf_generator", + "phf_shared", ] [[package]] @@ -2127,19 +2148,10 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" dependencies = [ - "phf_shared 0.10.0", + "phf_shared", "rand 0.8.5", ] -[[package]] -name = "phf_shared" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00cf8b9eafe68dde5e9eaa2cef8ee84a9336a47d566ec55ca16589633b65af7" -dependencies = [ - "siphasher", -] - [[package]] name = "phf_shared" version = "0.10.0" @@ -2201,12 +2213,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "precomputed-hash" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" - [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2331,7 +2337,6 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc", - "rand_pcg", ] [[package]] @@ -2395,7 +2400,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.5", ] [[package]] @@ -2407,15 +2412,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - [[package]] name = "raw-cpuid" version = "10.2.0" @@ -2589,7 +2585,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-rc.1" -source = "git+https://github.com/SergioBenitez/Rocket?rev=66d18bf66517e2765494d082629e9b9748ff8ad6#66d18bf66517e2765494d082629e9b9748ff8ad6" +source = "git+https://github.com/SergioBenitez/Rocket?rev=91e3b4397a1637d0f55f23db712cf7bda0c7f891#91e3b4397a1637d0f55f23db712cf7bda0c7f891" dependencies = [ "async-stream", "async-trait", @@ -2627,7 +2623,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-rc.1" -source = "git+https://github.com/SergioBenitez/Rocket?rev=66d18bf66517e2765494d082629e9b9748ff8ad6#66d18bf66517e2765494d082629e9b9748ff8ad6" +source = "git+https://github.com/SergioBenitez/Rocket?rev=91e3b4397a1637d0f55f23db712cf7bda0c7f891#91e3b4397a1637d0f55f23db712cf7bda0c7f891" dependencies = [ "devise", "glob", @@ -2642,7 +2638,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-rc.1" -source = "git+https://github.com/SergioBenitez/Rocket?rev=66d18bf66517e2765494d082629e9b9748ff8ad6#66d18bf66517e2765494d082629e9b9748ff8ad6" +source = "git+https://github.com/SergioBenitez/Rocket?rev=91e3b4397a1637d0f55f23db712cf7bda0c7f891#91e3b4397a1637d0f55f23db712cf7bda0c7f891" dependencies = [ "cookie 0.16.0", "either", @@ -2656,6 +2652,7 @@ dependencies = [ "pin-project-lite", "ref-cast", "rustls", + "rustls-pemfile", "serde", "smallvec 1.8.0", "stable-pattern", @@ -2683,17 +2680,25 @@ dependencies = [ [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" dependencies = [ - "base64 0.13.0", "log", "ring", "sct", "webpki", ] +[[package]] +name = "rustls-pemfile" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" +dependencies = [ + "base64 0.13.0", +] + [[package]] name = "rustversion" version = "1.0.6" @@ -2748,9 +2753,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -3083,30 +3088,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] -name = "string_cache" -version = "0.8.3" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33994d0838dc2d152d17a62adf608a869b5e846b65b389af7f3dbc1de45c5b26" -dependencies = [ - "lazy_static", - "new_debug_unreachable", - "parking_lot 0.11.2", - "phf_shared 0.10.0", - "precomputed-hash", - "serde", -] - -[[package]] -name = "string_cache_codegen" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f24c8e5e19d22a726626f1a5e16fe15b132dcf21d10177fa5a45ce7962996b97" -dependencies = [ - "phf_generator 0.8.0", - "phf_shared 0.8.0", - "proc-macro2", - "quote", -] +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" @@ -3151,17 +3136,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "tendril" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ef557cb397a4f0a5a3a628f06515f78563f2209e64d47055d9dc6052bf5e33" -dependencies = [ - "futf", - "mac", - "utf-8", -] - [[package]] name = "thiserror" version = "1.0.30" @@ -3324,9 +3298,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" dependencies = [ "rustls", "tokio", @@ -3588,12 +3562,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" - [[package]] name = "unicode-xid" version = "0.2.2" @@ -3640,19 +3608,13 @@ dependencies = [ "serde", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.5", ] [[package]] @@ -3667,6 +3629,7 @@ version = "1.0.0" dependencies = [ "backtrace", "bytes 1.1.0", + "cached", "chashmap", "chrono", "chrono-tz", @@ -3682,14 +3645,13 @@ dependencies = [ "futures", "governor", "handlebars", - "html5ever", + "html5gum", "idna 0.2.3", "job_scheduler", "jsonwebtoken", "lettre", "libsqlite3-sys", "log", - "markup5ever_rcdom", "num-derive", "num-traits", "once_cell", @@ -3860,9 +3822,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", @@ -3988,18 +3950,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "xml5ever" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9234163818fd8e2418fcde330655e757900d4236acd8cc70fef345ef91f6d865" -dependencies = [ - "log", - "mac", - "markup5ever", - "time 0.1.43", -] - [[package]] name = "yansi" version = "0.5.0" diff --git a/Cargo.toml b/Cargo.toml index d67464f1..1f76c0ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "vaultwarden" version = "1.0.0" authors = ["Daniel García "] edition = "2021" -rust-version = "1.58.1" +rust-version = "1.59" resolver = "2" repository = "https://github.com/dani-garcia/vaultwarden" @@ -116,11 +116,11 @@ handlebars = { version = "4.2.1", features = ["dir_source"] } reqwest = { version = "0.11.9", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] } # For favicon extraction from main website -html5ever = "0.25.1" -markup5ever_rcdom = "0.1.0" +html5gum = "0.4.0" regex = { version = "1.5.4", features = ["std", "perf", "unicode-perl"], default-features = false } data-url = "0.1.1" bytes = "1.1.0" +cached = "0.30.0" # Used for custom short lived cookie jar during favicon extraction cookie = "0.15.1" @@ -140,7 +140,7 @@ governor = "0.4.2" ctrlc = { version = "3.2.1", features = ["termination"] } [patch.crates-io] -rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '66d18bf66517e2765494d082629e9b9748ff8ad6' } +rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '91e3b4397a1637d0f55f23db712cf7bda0c7f891' } # The maintainer of the `job_scheduler` crate doesn't seem to have responded # to any issues or PRs for almost a year (as of April 2021). This hopefully @@ -148,3 +148,9 @@ rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '66d18bf66517e # In particular, `cron` has since implemented parsing of some common syntax # that wasn't previously supported (https://github.com/zslayton/cron/pull/64). job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' } + +# Strip debuginfo from the release builds +# Also enable thin LTO for some optimizations +[profile.release] +strip = "debuginfo" +lto = "thin" diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index 196af08d..a5194254 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -182,12 +182,6 @@ RUN touch src/main.rs # your actual source files being built # hadolint ignore=DL3059 RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} -{% if "alpine" in target_file %} -{% if "armv7" in target_file %} -# hadolint ignore=DL3059 -RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden -{% endif %} -{% endif %} ######################## RUNTIME IMAGE ######################## # Create a new stage with a minimal image diff --git a/docker/armv7/Dockerfile.alpine b/docker/armv7/Dockerfile.alpine index d00017bd..e05965bc 100644 --- a/docker/armv7/Dockerfile.alpine +++ b/docker/armv7/Dockerfile.alpine @@ -78,8 +78,6 @@ RUN touch src/main.rs # your actual source files being built # hadolint ignore=DL3059 RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf -# hadolint ignore=DL3059 -RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden ######################## RUNTIME IMAGE ######################## # Create a new stage with a minimal image diff --git a/docker/armv7/Dockerfile.buildx.alpine b/docker/armv7/Dockerfile.buildx.alpine index a80405d0..431e0ff9 100644 --- a/docker/armv7/Dockerfile.buildx.alpine +++ b/docker/armv7/Dockerfile.buildx.alpine @@ -78,8 +78,6 @@ RUN touch src/main.rs # your actual source files being built # hadolint ignore=DL3059 RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf -# hadolint ignore=DL3059 -RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden ######################## RUNTIME IMAGE ######################## # Create a new stage with a minimal image diff --git a/src/api/admin.rs b/src/api/admin.rs index 015ec7c7..6fbf30e9 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -301,7 +301,7 @@ fn test_smtp(data: Json, _token: AdminToken) -> EmptyResult { #[get("/logout")] fn logout(cookies: &CookieJar<'_>, referer: Referer) -> Redirect { - cookies.remove(Cookie::named(COOKIE_NAME)); + cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish()); Redirect::to(admin_url(referer)) } @@ -638,7 +638,7 @@ impl<'r> FromRequest<'r> for AdminToken { if decode_admin(access_token).is_err() { // Remove admin cookie - cookies.remove(Cookie::named(COOKIE_NAME)); + cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish()); error!("Invalid or expired admin JWT. IP: {}.", ip); return Outcome::Forward(()); } diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index bb6c6634..13012e96 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -1182,9 +1182,7 @@ async fn post_org_import( let ciphers = stream::iter(data.Ciphers) .then(|cipher_data| async { let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); - update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::CipherCreate) - .await - .ok(); + update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None).await.ok(); cipher }) .collect::>() diff --git a/src/api/icons.rs b/src/api/icons.rs index 6af10a35..71c4899d 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -1,21 +1,28 @@ use std::{ collections::HashMap, - net::{IpAddr, ToSocketAddrs}, - sync::{Arc, RwLock}, + net::IpAddr, + sync::Arc, time::{Duration, SystemTime}, }; -use bytes::{Buf, Bytes, BytesMut}; +use bytes::{Bytes, BytesMut}; use futures::{stream::StreamExt, TryFutureExt}; use once_cell::sync::Lazy; use regex::Regex; -use reqwest::{header, Client, Response}; +use reqwest::{ + header::{self, HeaderMap, HeaderValue}, + Client, Response, +}; use rocket::{http::ContentType, response::Redirect, Route}; use tokio::{ fs::{create_dir_all, remove_file, symlink_metadata, File}, io::{AsyncReadExt, AsyncWriteExt}, + net::lookup_host, + sync::RwLock, }; +use html5gum::{Emitter, EndTag, InfallibleTokenizer, Readable, StartTag, StringReader, Tokenizer}; + use crate::{ error::Error, util::{get_reqwest_client_builder, Cached}, @@ -34,39 +41,50 @@ pub fn routes() -> Vec { static CLIENT: Lazy = Lazy::new(|| { // Generate the default headers - let mut default_headers = header::HeaderMap::new(); - default_headers - .insert(header::USER_AGENT, header::HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)")); - default_headers - .insert(header::ACCEPT, header::HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1")); - default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en,*;q=0.1")); - default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache")); - default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache")); + let mut default_headers = HeaderMap::new(); + default_headers.insert(header::USER_AGENT, HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)")); + default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1")); + default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en,*;q=0.1")); + default_headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache")); + default_headers.insert(header::PRAGMA, HeaderValue::from_static("no-cache")); + + // Generate the cookie store + let cookie_store = Arc::new(Jar::default()); // Reuse the client between requests - get_reqwest_client_builder() - .cookie_provider(Arc::new(Jar::default())) + let client = get_reqwest_client_builder() + .cookie_provider(cookie_store.clone()) .timeout(Duration::from_secs(CONFIG.icon_download_timeout())) - .default_headers(default_headers) - .build() - .expect("Failed to build icon client") + .default_headers(default_headers.clone()); + + match client.build() { + Ok(client) => client, + Err(e) => { + error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'"); + get_reqwest_client_builder() + .cookie_provider(cookie_store) + .timeout(Duration::from_secs(CONFIG.icon_download_timeout())) + .default_headers(default_headers) + .trust_dns(false) + .build() + .expect("Failed to build client") + } + } }); // Build Regex only once since this takes a lot of time. -static ICON_REL_REGEX: Lazy = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap()); -static ICON_REL_BLACKLIST: Lazy = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap()); static ICON_SIZE_REGEX: Lazy = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); // Special HashMap which holds the user defined Regex to speedup matching the regex. static ICON_BLACKLIST_REGEX: Lazy>> = Lazy::new(|| RwLock::new(HashMap::new())); -fn icon_redirect(domain: &str, template: &str) -> Option { - if !is_valid_domain(domain) { +async fn icon_redirect(domain: &str, template: &str) -> Option { + if !is_valid_domain(domain).await { warn!("Invalid domain: {}", domain); return None; } - if is_domain_blacklisted(domain) { + if is_domain_blacklisted(domain).await { return None; } @@ -84,30 +102,30 @@ fn icon_redirect(domain: &str, template: &str) -> Option { } #[get("//icon.png")] -fn icon_custom(domain: String) -> Option { - icon_redirect(&domain, &CONFIG.icon_service()) +async fn icon_custom(domain: String) -> Option { + icon_redirect(&domain, &CONFIG.icon_service()).await } #[get("//icon.png")] -fn icon_bitwarden(domain: String) -> Option { - icon_redirect(&domain, "https://icons.bitwarden.net/{}/icon.png") +async fn icon_bitwarden(domain: String) -> Option { + icon_redirect(&domain, "https://icons.bitwarden.net/{}/icon.png").await } #[get("//icon.png")] -fn icon_duckduckgo(domain: String) -> Option { - icon_redirect(&domain, "https://icons.duckduckgo.com/ip3/{}.ico") +async fn icon_duckduckgo(domain: String) -> Option { + icon_redirect(&domain, "https://icons.duckduckgo.com/ip3/{}.ico").await } #[get("//icon.png")] -fn icon_google(domain: String) -> Option { - icon_redirect(&domain, "https://www.google.com/s2/favicons?domain={}&sz=32") +async fn icon_google(domain: String) -> Option { + icon_redirect(&domain, "https://www.google.com/s2/favicons?domain={}&sz=32").await } #[get("//icon.png")] async fn icon_internal(domain: String) -> Cached<(ContentType, Vec)> { const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png"); - if !is_valid_domain(&domain) { + if !is_valid_domain(&domain).await { warn!("Invalid domain: {}", domain); return Cached::ttl( (ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), @@ -128,7 +146,7 @@ async fn icon_internal(domain: String) -> Cached<(ContentType, Vec)> { /// /// This does some manual checks and makes use of Url to do some basic checking. /// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255. -fn is_valid_domain(domain: &str) -> bool { +async fn is_valid_domain(domain: &str) -> bool { const ALLOWED_CHARS: &str = "_-."; // If parsing the domain fails using Url, it will not work with reqwest. @@ -260,57 +278,52 @@ mod tests { } } -fn is_domain_blacklisted(domain: &str) -> bool { - let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips() - && (domain, 0) - .to_socket_addrs() - .map(|x| { - for ip_port in x { - if !is_global(ip_port.ip()) { - warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain); - return true; - } +use cached::proc_macro::cached; +#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)] +async fn is_domain_blacklisted(domain: &str) -> bool { + if CONFIG.icon_blacklist_non_global_ips() { + if let Ok(s) = lookup_host((domain, 0)).await { + for addr in s { + if !is_global(addr.ip()) { + debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain); + return true; } - false - }) - .unwrap_or(false); - - // Skip the regex check if the previous one is true already - if !is_blacklisted { - if let Some(blacklist) = CONFIG.icon_blacklist_regex() { - let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap(); - - // Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it. - let regex = if let Some(regex) = regex_hashmap.get(&blacklist) { - regex - } else { - drop(regex_hashmap); - - let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap(); - // Clear the current list if the previous key doesn't exists. - // To prevent growing of the HashMap after someone has changed it via the admin interface. - if regex_hashmap_write.len() >= 1 { - regex_hashmap_write.clear(); - } - - // Generate the regex to store in too the Lazy Static HashMap. - let blacklist_regex = Regex::new(&blacklist).unwrap(); - regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex); - drop(regex_hashmap_write); - - regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap(); - regex_hashmap.get(&blacklist).unwrap() - }; - - // Use the pre-generate Regex stored in a Lazy HashMap. - if regex.is_match(domain) { - debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain); - is_blacklisted = true; } } } - is_blacklisted + if let Some(blacklist) = CONFIG.icon_blacklist_regex() { + let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().await; + + // Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it. + let regex = if let Some(regex) = regex_hashmap.get(&blacklist) { + regex + } else { + drop(regex_hashmap); + + let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().await; + // Clear the current list if the previous key doesn't exists. + // To prevent growing of the HashMap after someone has changed it via the admin interface. + if regex_hashmap_write.len() >= 1 { + regex_hashmap_write.clear(); + } + + // Generate the regex to store in too the Lazy Static HashMap. + let blacklist_regex = Regex::new(&blacklist); + regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex.unwrap()); + drop(regex_hashmap_write); + + regex_hashmap = ICON_BLACKLIST_REGEX.read().await; + regex_hashmap.get(&blacklist).unwrap() + }; + + // Use the pre-generate Regex stored in a Lazy HashMap. + if regex.is_match(domain) { + debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain); + return true; + } + } + false } async fn get_icon(domain: &str) -> Option<(Vec, String)> { @@ -322,7 +335,7 @@ async fn get_icon(domain: &str) -> Option<(Vec, String)> { } if let Some(icon) = get_cached_icon(&path).await { - let icon_type = match get_icon_type(&icon) { + let icon_type = match get_icon_type(&icon).await { Some(x) => x, _ => "x-icon", }; @@ -412,91 +425,62 @@ impl Icon { } } -/// Iterates over the HTML document to find -/// When found it will stop the iteration and the found base href will be shared deref via `base_href`. -/// -/// # Arguments -/// * `node` - A Parsed HTML document via html5ever::parse_document() -/// * `base_href` - a mutable url::Url which will be overwritten when a base href tag has been found. -/// -fn get_base_href(node: &std::rc::Rc, base_href: &mut url::Url) -> bool { - if let markup5ever_rcdom::NodeData::Element { - name, - attrs, - .. - } = &node.data - { - if name.local.as_ref() == "base" { - let attrs = attrs.borrow(); - for attr in attrs.iter() { - let attr_name = attr.name.local.as_ref(); - let attr_value = attr.value.as_ref(); +async fn get_favicons_node( + dom: InfallibleTokenizer, FaviconEmitter>, + icons: &mut Vec, + url: &url::Url, +) { + const TAG_LINK: &[u8] = b"link"; + const TAG_BASE: &[u8] = b"base"; + const TAG_HEAD: &[u8] = b"head"; + const ATTR_REL: &[u8] = b"rel"; + const ATTR_HREF: &[u8] = b"href"; + const ATTR_SIZES: &[u8] = b"sizes"; - if attr_name == "href" { - debug!("Found base href: {}", attr_value); - *base_href = match base_href.join(attr_value) { - Ok(href) => href, - _ => base_href.clone(), - }; - return true; - } - } - return true; - } - } - - // TODO: Might want to limit the recursion depth? - for child in node.children.borrow().iter() { - // Check if we got a true back and stop the iter. - // This means we found a tag and can stop processing the html. - if get_base_href(child, base_href) { - return true; - } - } - false -} - -fn get_favicons_node(node: &std::rc::Rc, icons: &mut Vec, url: &url::Url) { - if let markup5ever_rcdom::NodeData::Element { - name, - attrs, - .. - } = &node.data - { - if name.local.as_ref() == "link" { - let mut has_rel = false; - let mut href = None; - let mut sizes = None; - - let attrs = attrs.borrow(); - for attr in attrs.iter() { - let attr_name = attr.name.local.as_ref(); - let attr_value = attr.value.as_ref(); - - if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value) + let mut base_url = url.clone(); + let mut icon_tags: Vec = Vec::new(); + for token in dom { + match token { + FaviconToken::StartTag(tag) => { + if tag.name == TAG_LINK + && tag.attributes.contains_key(ATTR_REL) + && tag.attributes.contains_key(ATTR_HREF) { - has_rel = true; - } else if attr_name == "href" { - href = Some(attr_value); - } else if attr_name == "sizes" { - sizes = Some(attr_value); + let rel_value = std::str::from_utf8(tag.attributes.get(ATTR_REL).unwrap()) + .unwrap_or_default() + .to_ascii_lowercase(); + if rel_value.contains("icon") && !rel_value.contains("mask-icon") { + icon_tags.push(tag); + } + } else if tag.name == TAG_BASE && tag.attributes.contains_key(ATTR_HREF) { + let href = std::str::from_utf8(tag.attributes.get(ATTR_HREF).unwrap()).unwrap_or_default(); + debug!("Found base href: {href}"); + base_url = match base_url.join(href) { + Ok(inner_url) => inner_url, + _ => url.clone(), + }; } } - - if has_rel { - if let Some(inner_href) = href { - if let Ok(full_href) = url.join(inner_href).map(String::from) { - let priority = get_icon_priority(&full_href, sizes); - icons.push(Icon::new(priority, full_href)); - } + FaviconToken::EndTag(tag) => { + if tag.name == TAG_HEAD { + break; } } } } - // TODO: Might want to limit the recursion depth? - for child in node.children.borrow().iter() { - get_favicons_node(child, icons, url); + for icon_tag in icon_tags { + if let Some(icon_href) = icon_tag.attributes.get(ATTR_HREF) { + if let Ok(full_href) = base_url.join(std::str::from_utf8(icon_href).unwrap_or_default()) { + let sizes = if let Some(v) = icon_tag.attributes.get(ATTR_SIZES) { + std::str::from_utf8(v).unwrap_or_default() + } else { + "" + }; + let priority = get_icon_priority(full_href.as_str(), sizes).await; + icons.push(Icon::new(priority, full_href.to_string())); + } + }; } } @@ -514,13 +498,13 @@ struct IconUrlResult { /// /// # Example /// ``` -/// let icon_result = get_icon_url("github.com")?; -/// let icon_result = get_icon_url("vaultwarden.discourse.group")?; +/// let icon_result = get_icon_url("github.com").await?; +/// let icon_result = get_icon_url("vaultwarden.discourse.group").await?; /// ``` async fn get_icon_url(domain: &str) -> Result { // Default URL with secure and insecure schemes - let ssldomain = format!("https://{}", domain); - let httpdomain = format!("http://{}", domain); + let ssldomain = format!("https://{domain}"); + let httpdomain = format!("http://{domain}"); // First check the domain as given during the request for both HTTPS and HTTP. let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await { @@ -537,26 +521,25 @@ async fn get_icon_url(domain: &str) -> Result { tld = domain_parts.next_back().unwrap(), base = domain_parts.next_back().unwrap() ); - if is_valid_domain(&base_domain) { - let sslbase = format!("https://{}", base_domain); - let httpbase = format!("http://{}", base_domain); - debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain); + if is_valid_domain(&base_domain).await { + let sslbase = format!("https://{base_domain}"); + let httpbase = format!("http://{base_domain}"); + debug!("[get_icon_url]: Trying without subdomains '{base_domain}'"); sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await; } // When the domain is not an IP, and has less then 2 dots, try to add www. infront of it. } else if is_ip.is_err() && domain.matches('.').count() < 2 { - let www_domain = format!("www.{}", domain); - if is_valid_domain(&www_domain) { - let sslwww = format!("https://{}", www_domain); - let httpwww = format!("http://{}", www_domain); - debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain); + let www_domain = format!("www.{domain}"); + if is_valid_domain(&www_domain).await { + let sslwww = format!("https://{www_domain}"); + let httpwww = format!("http://{www_domain}"); + debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'"); sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await; } } - sub_resp } }; @@ -571,26 +554,23 @@ async fn get_icon_url(domain: &str) -> Result { // Set the referer to be used on the final request, some sites check this. // Mostly used to prevent direct linking and other security resons. - referer = url.as_str().to_string(); + referer = url.to_string(); - // Add the default favicon.ico to the list with the domain the content responded from. + // Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from. iconlist.push(Icon::new(35, String::from(url.join("/favicon.ico").unwrap()))); + iconlist.push(Icon::new(40, String::from(url.join("/apple-touch-icon.png").unwrap()))); // 384KB should be more than enough for the HTML, though as we only really need the HTML header. - let mut limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.reader(); + let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec(); - use html5ever::tendril::TendrilSink; - let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default()) - .from_utf8() - .read_from(&mut limited_reader)?; - - let mut base_url: url::Url = url; - get_base_href(&dom.document, &mut base_url); - get_favicons_node(&dom.document, &mut iconlist, &base_url); + let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible(); + get_favicons_node(dom, &mut iconlist, &url).await; } else { // Add the default favicon.ico to the list with just the given domain - iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain))); - iconlist.push(Icon::new(35, format!("{}/favicon.ico", httpdomain))); + iconlist.push(Icon::new(35, format!("{ssldomain}/favicon.ico"))); + iconlist.push(Icon::new(40, format!("{ssldomain}/apple-touch-icon.png"))); + iconlist.push(Icon::new(35, format!("{httpdomain}/favicon.ico"))); + iconlist.push(Icon::new(40, format!("{httpdomain}/apple-touch-icon.png"))); } // Sort the iconlist by priority @@ -608,7 +588,7 @@ async fn get_page(url: &str) -> Result { } async fn get_page_with_referer(url: &str, referer: &str) -> Result { - if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()) { + if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await { warn!("Favicon '{}' resolves to a blacklisted domain or IP!", url); } @@ -632,12 +612,12 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result) -> u8 { +async fn get_icon_priority(href: &str, sizes: &str) -> u8 { // Check if there is a dimension set - let (width, height) = parse_sizes(sizes); + let (width, height) = parse_sizes(sizes).await; // Check if there is a size given if width != 0 && height != 0 { @@ -679,15 +659,15 @@ fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 { /// /// # Example /// ``` -/// let (width, height) = parse_sizes("64x64"); // (64, 64) -/// let (width, height) = parse_sizes("x128x128"); // (128, 128) -/// let (width, height) = parse_sizes("32"); // (0, 0) +/// let (width, height) = parse_sizes("64x64").await; // (64, 64) +/// let (width, height) = parse_sizes("x128x128").await; // (128, 128) +/// let (width, height) = parse_sizes("32").await; // (0, 0) /// ``` -fn parse_sizes(sizes: Option<&str>) -> (u16, u16) { +async fn parse_sizes(sizes: &str) -> (u16, u16) { let mut width: u16 = 0; let mut height: u16 = 0; - if let Some(sizes) = sizes { + if !sizes.is_empty() { match ICON_SIZE_REGEX.captures(sizes.trim()) { None => {} Some(dimensions) => { @@ -703,7 +683,7 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) { } async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { - if is_domain_blacklisted(domain) { + if is_domain_blacklisted(domain).await { err_silent!("Domain is blacklisted", domain) } @@ -727,7 +707,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { // Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create if body.len() >= 67 { // Check if the icon type is allowed, else try an icon from the list. - icon_type = get_icon_type(&body); + icon_type = get_icon_type(&body).await; if icon_type.is_none() { debug!("Icon from {} data:image uri, is not a valid image type", domain); continue; @@ -742,10 +722,10 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { } else { match get_page_with_referer(&icon.href, &icon_result.referer).await { Ok(res) => { - buffer = stream_to_bytes_limit(res, 512 * 1024).await?; // 512 KB for each icon max - // Check if the icon type is allowed, else try an icon from the list. - icon_type = get_icon_type(&buffer); + buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net) + // Check if the icon type is allowed, else try an icon from the list. + icon_type = get_icon_type(&buffer).await; if icon_type.is_none() { buffer.clear(); debug!("Icon from {}, is not a valid image type", icon.href); @@ -780,7 +760,7 @@ async fn save_icon(path: &str, icon: &[u8]) { } } -fn get_icon_type(bytes: &[u8]) -> Option<&'static str> { +async fn get_icon_type(bytes: &[u8]) -> Option<&'static str> { match bytes { [137, 80, 78, 71, ..] => Some("png"), [0, 0, 1, 0, ..] => Some("x-icon"), @@ -792,13 +772,30 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> { } } +/// Minimize the amount of bytes to be parsed from a reqwest result. +/// This prevents very long parsing and memory usage. +async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result { + let mut stream = res.bytes_stream().take(max_size); + let mut buf = BytesMut::new(); + let mut size = 0; + while let Some(chunk) = stream.next().await { + let chunk = &chunk?; + size += chunk.len(); + buf.extend(chunk); + if size >= max_size { + break; + } + } + Ok(buf.freeze()) +} + /// This is an implementation of the default Cookie Jar from Reqwest and reqwest_cookie_store build by pfernie. /// The default cookie jar used by Reqwest keeps all the cookies based upon the Max-Age or Expires which could be a long time. /// That could be used for tracking, to prevent this we force the lifespan of the cookies to always be max two minutes. /// A Cookie Jar is needed because some sites force a redirect with cookies to verify if a request uses cookies or not. use cookie_store::CookieStore; #[derive(Default)] -pub struct Jar(RwLock); +pub struct Jar(std::sync::RwLock); impl reqwest::cookie::CookieStore for Jar { fn set_cookies(&self, cookie_headers: &mut dyn Iterator, url: &url::Url) { @@ -836,11 +833,136 @@ impl reqwest::cookie::CookieStore for Jar { } } -async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result { - let mut stream = res.bytes_stream().take(max_size); - let mut buf = BytesMut::new(); - while let Some(chunk) = stream.next().await { - buf.extend(chunk?); - } - Ok(buf.freeze()) +/// Custom FaviconEmitter for the html5gum parser. +/// The FaviconEmitter is using an almost 1:1 copy of the DefaultEmitter with some small changes. +/// This prevents emitting tags like comments, doctype and also strings between the tags. +/// Therefor parsing the HTML content is faster. +use std::collections::{BTreeSet, VecDeque}; + +enum FaviconToken { + StartTag(StartTag), + EndTag(EndTag), +} + +#[derive(Default)] +struct FaviconEmitter { + current_token: Option, + last_start_tag: Vec, + current_attribute: Option<(Vec, Vec)>, + seen_attributes: BTreeSet>, + emitted_tokens: VecDeque, +} + +impl FaviconEmitter { + fn emit_token(&mut self, token: FaviconToken) { + self.emitted_tokens.push_front(token); + } + + fn flush_current_attribute(&mut self) { + if let Some((k, v)) = self.current_attribute.take() { + match self.current_token { + Some(FaviconToken::StartTag(ref mut tag)) => { + tag.attributes.entry(k).and_modify(|_| {}).or_insert(v); + } + Some(FaviconToken::EndTag(_)) => { + self.seen_attributes.insert(k); + } + _ => { + debug_assert!(false); + } + } + } + } +} + +impl Emitter for FaviconEmitter { + type Token = FaviconToken; + + fn set_last_start_tag(&mut self, last_start_tag: Option<&[u8]>) { + self.last_start_tag.clear(); + self.last_start_tag.extend(last_start_tag.unwrap_or_default()); + } + + fn pop_token(&mut self) -> Option { + self.emitted_tokens.pop_back() + } + + fn init_start_tag(&mut self) { + self.current_token = Some(FaviconToken::StartTag(StartTag::default())); + } + + fn init_end_tag(&mut self) { + self.current_token = Some(FaviconToken::EndTag(EndTag::default())); + self.seen_attributes.clear(); + } + + fn emit_current_tag(&mut self) { + self.flush_current_attribute(); + let mut token = self.current_token.take().unwrap(); + match token { + FaviconToken::EndTag(_) => { + self.seen_attributes.clear(); + } + FaviconToken::StartTag(ref mut tag) => { + self.set_last_start_tag(Some(&tag.name)); + } + } + self.emit_token(token); + } + + fn push_tag_name(&mut self, s: &[u8]) { + match self.current_token { + Some( + FaviconToken::StartTag(StartTag { + ref mut name, + .. + }) + | FaviconToken::EndTag(EndTag { + ref mut name, + .. + }), + ) => { + name.extend(s); + } + _ => debug_assert!(false), + } + } + + fn init_attribute(&mut self) { + self.flush_current_attribute(); + self.current_attribute = Some((Vec::new(), Vec::new())); + } + + fn push_attribute_name(&mut self, s: &[u8]) { + self.current_attribute.as_mut().unwrap().0.extend(s); + } + + fn push_attribute_value(&mut self, s: &[u8]) { + self.current_attribute.as_mut().unwrap().1.extend(s); + } + + fn current_is_appropriate_end_tag_token(&mut self) -> bool { + match self.current_token { + Some(FaviconToken::EndTag(ref tag)) => !self.last_start_tag.is_empty() && self.last_start_tag == tag.name, + _ => false, + } + } + + // We do not want and need these parts of the HTML document + // These will be skipped and ignored during the tokenization and iteration. + fn emit_current_comment(&mut self) {} + fn emit_current_doctype(&mut self) {} + fn emit_eof(&mut self) {} + fn emit_error(&mut self, _: html5gum::Error) {} + fn emit_string(&mut self, _: &[u8]) {} + fn init_comment(&mut self) {} + fn init_doctype(&mut self) {} + fn push_comment(&mut self, _: &[u8]) {} + fn push_doctype_name(&mut self, _: &[u8]) {} + fn push_doctype_public_identifier(&mut self, _: &[u8]) {} + fn push_doctype_system_identifier(&mut self, _: &[u8]) {} + fn set_doctype_public_identifier(&mut self, _: &[u8]) {} + fn set_doctype_system_identifier(&mut self, _: &[u8]) {} + fn set_force_quirks(&mut self) {} + fn set_self_closing(&mut self) {} } diff --git a/src/config.rs b/src/config.rs index d2a52ef9..f00ea50d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -569,12 +569,14 @@ make_config! { _enable_smtp: bool, true, def, true; /// Host smtp_host: String, true, option; - /// Enable Secure SMTP |> (Explicit) - Enabling this by default would use STARTTLS (Standard ports 587 or 25) - smtp_ssl: bool, true, def, true; - /// Force TLS |> (Implicit) - Enabling this would force the use of an SSL/TLS connection, instead of upgrading an insecure one with STARTTLS (Standard port 465) - smtp_explicit_tls: bool, true, def, false; + /// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY + smtp_ssl: bool, false, option; + /// DEPRECATED smtp_explicit_tls |> DEPRECATED - Please use SMTP_SECURITY + smtp_explicit_tls: bool, false, option; + /// Secure SMTP |> ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption + smtp_security: String, true, auto, |c| smtp_convert_deprecated_ssl_options(c.smtp_ssl, c.smtp_explicit_tls); // TODO: After deprecation make it `def, "starttls".to_string()` /// Port - smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25}; + smtp_port: u16, true, auto, |c| if c.smtp_security == *"force_tls" {465} else if c.smtp_security == *"starttls" {587} else {25}; /// From Address smtp_from: String, true, def, String::new(); /// From Name @@ -657,6 +659,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { } if cfg._enable_smtp { + match cfg.smtp_security.as_str() { + "off" | "starttls" | "force_tls" => (), + _ => err!( + "`SMTP_SECURITY` is invalid. It needs to be one of the following options: starttls, force_tls or off" + ), + } + if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() { err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support") } @@ -735,6 +744,20 @@ fn extract_url_path(url: &str) -> String { } } +/// Convert the old SMTP_SSL and SMTP_EXPLICIT_TLS options +fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option, smtp_explicit_tls: Option) -> String { + if smtp_explicit_tls.is_some() || smtp_ssl.is_some() { + println!("[DEPRECATED]: `SMTP_SSL` or `SMTP_EXPLICIT_TLS` is set. Please use `SMTP_SECURITY` instead."); + } + if smtp_explicit_tls.is_some() && smtp_explicit_tls.unwrap() { + return "force_tls".to_string(); + } else if smtp_ssl.is_some() && !smtp_ssl.unwrap() { + return "off".to_string(); + } + // Return the default `starttls` in all other cases + "starttls".to_string() +} + impl Config { pub fn load() -> Result { // Loading from env and file diff --git a/src/mail.rs b/src/mail.rs index df9919d2..362d4aa3 100644 --- a/src/mail.rs +++ b/src/mail.rs @@ -30,7 +30,7 @@ fn mailer() -> SmtpTransport { .timeout(Some(Duration::from_secs(CONFIG.smtp_timeout()))); // Determine security - let smtp_client = if CONFIG.smtp_ssl() || CONFIG.smtp_explicit_tls() { + let smtp_client = if CONFIG.smtp_security() != *"off" { let mut tls_parameters = TlsParameters::builder(host); if CONFIG.smtp_accept_invalid_hostnames() { tls_parameters = tls_parameters.dangerous_accept_invalid_hostnames(true); @@ -40,7 +40,7 @@ fn mailer() -> SmtpTransport { } let tls_parameters = tls_parameters.build().unwrap(); - if CONFIG.smtp_explicit_tls() { + if CONFIG.smtp_security() == *"force_tls" { smtp_client.tls(Tls::Wrapper(tls_parameters)) } else { smtp_client.tls(Tls::Required(tls_parameters)) diff --git a/src/static/scripts/bootstrap-native.js b/src/static/scripts/bootstrap-native.js index 3827dfa6..c00b4e87 100644 --- a/src/static/scripts/bootstrap-native.js +++ b/src/static/scripts/bootstrap-native.js @@ -1,6 +1,6 @@ /*! - * Native JavaScript for Bootstrap v4.0.8 (https://thednp.github.io/bootstrap.native/) - * Copyright 2015-2021 © dnp_theme + * Native JavaScript for Bootstrap v4.1.0 (https://thednp.github.io/bootstrap.native/) + * Copyright 2015-2022 © dnp_theme * Licensed under MIT (https://github.com/thednp/bootstrap.native/blob/master/LICENSE) */ (function (global, factory) { @@ -9,157 +9,599 @@ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, global.BSN = factory()); })(this, (function () { 'use strict'; - const transitionEndEvent = 'webkitTransition' in document.head.style ? 'webkitTransitionEnd' : 'transitionend'; + /** @type {Record} */ + const EventRegistry = {}; - const supportTransition = 'webkitTransition' in document.head.style || 'transition' in document.head.style; + /** + * The global event listener. + * + * @this {Element | HTMLElement | Window | Document} + * @param {Event} e + * @returns {void} + */ + function globalListener(e) { + const that = this; + const { type } = e; + const oneEvMap = EventRegistry[type] ? [...EventRegistry[type]] : []; - const transitionDuration = 'webkitTransition' in document.head.style ? 'webkitTransitionDuration' : 'transitionDuration'; + oneEvMap.forEach((elementsMap) => { + const [element, listenersMap] = elementsMap; + [...listenersMap].forEach((listenerMap) => { + if (element === that) { + const [listener, options] = listenerMap; + listener.apply(element, [e]); - const transitionProperty = 'webkitTransition' in document.head.style ? 'webkitTransitionProperty' : 'transitionProperty'; + if (options && options.once) { + removeListener(element, type, listener, options); + } + } + }); + }); + } - function getElementTransitionDuration(element) { + /** + * Register a new listener with its options and attach the `globalListener` + * to the target if this is the first listener. + * + * @param {Element | HTMLElement | Window | Document} element + * @param {string} eventType + * @param {EventListenerObject['handleEvent']} listener + * @param {AddEventListenerOptions=} options + */ + const addListener = (element, eventType, listener, options) => { + // get element listeners first + if (!EventRegistry[eventType]) { + EventRegistry[eventType] = new Map(); + } + const oneEventMap = EventRegistry[eventType]; + + if (!oneEventMap.has(element)) { + oneEventMap.set(element, new Map()); + } + const oneElementMap = oneEventMap.get(element); + + // get listeners size + const { size } = oneElementMap; + + // register listener with its options + if (oneElementMap) { + oneElementMap.set(listener, options); + } + + // add listener last + if (!size) { + element.addEventListener(eventType, globalListener, options); + } + }; + + /** + * Remove a listener from registry and detach the `globalListener` + * if no listeners are found in the registry. + * + * @param {Element | HTMLElement | Window | Document} element + * @param {string} eventType + * @param {EventListenerObject['handleEvent']} listener + * @param {AddEventListenerOptions=} options + */ + const removeListener = (element, eventType, listener, options) => { + // get listener first + const oneEventMap = EventRegistry[eventType]; + const oneElementMap = oneEventMap && oneEventMap.get(element); + const savedOptions = oneElementMap && oneElementMap.get(listener); + + // also recover initial options + const { options: eventOptions } = savedOptions !== undefined + ? savedOptions + : { options }; + + // unsubscribe second, remove from registry + if (oneElementMap && oneElementMap.has(listener)) oneElementMap.delete(listener); + if (oneEventMap && (!oneElementMap || !oneElementMap.size)) oneEventMap.delete(element); + if (!oneEventMap || !oneEventMap.size) delete EventRegistry[eventType]; + + // remove listener last + if (!oneElementMap || !oneElementMap.size) { + element.removeEventListener(eventType, globalListener, eventOptions); + } + }; + + /** + * Advanced event listener based on subscribe / publish pattern. + * @see https://www.patterns.dev/posts/classic-design-patterns/#observerpatternjavascript + * @see https://gist.github.com/shystruk/d16c0ee7ac7d194da9644e5d740c8338#file-subpub-js + * @see https://hackernoon.com/do-you-still-register-window-event-listeners-in-each-component-react-in-example-31a4b1f6f1c8 + */ + const EventListener = { + on: addListener, + off: removeListener, + globalListener, + registry: EventRegistry, + }; + + /** + * A global namespace for `click` event. + * @type {string} + */ + const mouseclickEvent = 'click'; + + /** + * A global namespace for 'transitionend' string. + * @type {string} + */ + const transitionEndEvent = 'transitionend'; + + /** + * A global namespace for 'transitionDelay' string. + * @type {string} + */ + const transitionDelay = 'transitionDelay'; + + /** + * A global namespace for `transitionProperty` string for modern browsers. + * + * @type {string} + */ + const transitionProperty = 'transitionProperty'; + + /** + * Shortcut for `window.getComputedStyle(element).propertyName` + * static method. + * + * * If `element` parameter is not an `HTMLElement`, `getComputedStyle` + * throws a `ReferenceError`. + * + * @param {HTMLElement | Element} element target + * @param {string} property the css property + * @return {string} the css property value + */ + function getElementStyle(element, property) { const computedStyle = getComputedStyle(element); - const propertyValue = computedStyle[transitionProperty]; - const durationValue = computedStyle[transitionDuration]; + + // @ts-ignore -- must use camelcase strings, + // or non-camelcase strings with `getPropertyValue` + return property in computedStyle ? computedStyle[property] : ''; + } + + /** + * Utility to get the computed `transitionDelay` + * from Element in miliseconds. + * + * @param {HTMLElement | Element} element target + * @return {number} the value in miliseconds + */ + function getElementTransitionDelay(element) { + const propertyValue = getElementStyle(element, transitionProperty); + const delayValue = getElementStyle(element, transitionDelay); + + const delayScale = delayValue.includes('ms') ? 1 : 1000; + const duration = propertyValue && propertyValue !== 'none' + ? parseFloat(delayValue) * delayScale : 0; + + return !Number.isNaN(duration) ? duration : 0; + } + + /** + * A global namespace for 'transitionDuration' string. + * @type {string} + */ + const transitionDuration = 'transitionDuration'; + + /** + * Utility to get the computed `transitionDuration` + * from Element in miliseconds. + * + * @param {HTMLElement | Element} element target + * @return {number} the value in miliseconds + */ + function getElementTransitionDuration(element) { + const propertyValue = getElementStyle(element, transitionProperty); + const durationValue = getElementStyle(element, transitionDuration); const durationScale = durationValue.includes('ms') ? 1 : 1000; - const duration = supportTransition && propertyValue && propertyValue !== 'none' + const duration = propertyValue && propertyValue !== 'none' ? parseFloat(durationValue) * durationScale : 0; return !Number.isNaN(duration) ? duration : 0; } + /** + * Utility to make sure callbacks are consistently + * called when transition ends. + * + * @param {HTMLElement | Element} element target + * @param {EventListener} handler `transitionend` callback + */ function emulateTransitionEnd(element, handler) { let called = 0; const endEvent = new Event(transitionEndEvent); const duration = getElementTransitionDuration(element); + const delay = getElementTransitionDelay(element); if (duration) { - element.addEventListener(transitionEndEvent, function transitionEndWrapper(e) { + /** + * Wrap the handler in on -> off callback + * @type {EventListener} e Event object + */ + const transitionEndWrapper = (e) => { if (e.target === element) { handler.apply(element, [e]); element.removeEventListener(transitionEndEvent, transitionEndWrapper); called = 1; } - }); + }; + element.addEventListener(transitionEndEvent, transitionEndWrapper); setTimeout(() => { if (!called) element.dispatchEvent(endEvent); - }, duration + 17); + }, duration + delay + 17); } else { handler.apply(element, [endEvent]); } } - function queryElement(selector, parent) { - const lookUp = parent && parent instanceof Element ? parent : document; - return selector instanceof Element ? selector : lookUp.querySelector(selector); + /** + * Returns the `document` or the `#document` element. + * @see https://github.com/floating-ui/floating-ui + * @param {(Node | HTMLElement | Element | globalThis)=} node + * @returns {Document} + */ + function getDocument(node) { + if (node instanceof HTMLElement) return node.ownerDocument; + if (node instanceof Window) return node.document; + return window.document; } + /** + * A global array of possible `ParentNode`. + */ + const parentNodes = [Document, Element, HTMLElement]; + + /** + * A global array with `Element` | `HTMLElement`. + */ + const elementNodes = [Element, HTMLElement]; + + /** + * Utility to check if target is typeof `HTMLElement`, `Element`, `Node` + * or find one that matches a selector. + * + * @param {HTMLElement | Element | string} selector the input selector or target element + * @param {(HTMLElement | Element | Document)=} parent optional node to look into + * @return {(HTMLElement | Element)?} the `HTMLElement` or `querySelector` result + */ + function querySelector(selector, parent) { + const lookUp = parentNodes.some((x) => parent instanceof x) + ? parent : getDocument(); + + // @ts-ignore + return elementNodes.some((x) => selector instanceof x) + // @ts-ignore + ? selector : lookUp.querySelector(selector); + } + + /** + * Shortcut for `HTMLElement.closest` method which also works + * with children of `ShadowRoot`. The order of the parameters + * is intentional since they're both required. + * + * @see https://stackoverflow.com/q/54520554/803358 + * + * @param {HTMLElement | Element} element Element to look into + * @param {string} selector the selector name + * @return {(HTMLElement | Element)?} the query result + */ + function closest(element, selector) { + return element ? (element.closest(selector) + // @ts-ignore -- break out of `ShadowRoot` + || closest(element.getRootNode().host, selector)) : null; + } + + /** + * Shortcut for `Object.assign()` static method. + * @param {Record} obj a target object + * @param {Record} source a source object + */ + const ObjectAssign = (obj, source) => Object.assign(obj, source); + + /** + * Check class in `HTMLElement.classList`. + * + * @param {HTMLElement | Element} element target + * @param {string} classNAME to check + * @returns {boolean} + */ function hasClass(element, classNAME) { return element.classList.contains(classNAME); } + /** + * Remove class from `HTMLElement.classList`. + * + * @param {HTMLElement | Element} element target + * @param {string} classNAME to remove + * @returns {void} + */ function removeClass(element, classNAME) { element.classList.remove(classNAME); } - const addEventListener = 'addEventListener'; + /** + * Shortcut for the `Element.dispatchEvent(Event)` method. + * + * @param {HTMLElement | Element} element is the target + * @param {Event} event is the `Event` object + */ + const dispatchEvent = (element, event) => element.dispatchEvent(event); - const removeEventListener = 'removeEventListener'; + /** @type {Map>>} */ + const componentData = new Map(); + /** + * An interface for web components background data. + * @see https://github.com/thednp/bootstrap.native/blob/master/src/components/base-component.js + */ + const Data = { + /** + * Sets web components data. + * @param {HTMLElement | Element | string} target target element + * @param {string} component the component's name or a unique key + * @param {Record} instance the component instance + */ + set: (target, component, instance) => { + const element = querySelector(target); + if (!element) return; - const fadeClass = 'fade'; + if (!componentData.has(component)) { + componentData.set(component, new Map()); + } - const showClass = 'show'; + const instanceMap = componentData.get(component); + // @ts-ignore - not undefined, but defined right above + instanceMap.set(element, instance); + }, - const dataBsDismiss = 'data-bs-dismiss'; + /** + * Returns all instances for specified component. + * @param {string} component the component's name or a unique key + * @returns {Map>?} all the component instances + */ + getAllFor: (component) => { + const instanceMap = componentData.get(component); - function bootstrapCustomEvent(namespacedEventType, eventProperties) { - const OriginalCustomEvent = new CustomEvent(namespacedEventType, { cancelable: true }); + return instanceMap || null; + }, - if (eventProperties instanceof Object) { - Object.keys(eventProperties).forEach((key) => { - Object.defineProperty(OriginalCustomEvent, key, { - value: eventProperties[key], - }); - }); + /** + * Returns the instance associated with the target. + * @param {HTMLElement | Element | string} target target element + * @param {string} component the component's name or a unique key + * @returns {Record?} the instance + */ + get: (target, component) => { + const element = querySelector(target); + const allForC = Data.getAllFor(component); + const instance = element && allForC && allForC.get(element); + + return instance || null; + }, + + /** + * Removes web components data. + * @param {HTMLElement | Element | string} target target element + * @param {string} component the component's name or a unique key + */ + remove: (target, component) => { + const element = querySelector(target); + const instanceMap = componentData.get(component); + if (!instanceMap || !element) return; + + instanceMap.delete(element); + + if (instanceMap.size === 0) { + componentData.delete(component); + } + }, + }; + + /** + * An alias for `Data.get()`. + * @type {SHORTER.getInstance} + */ + const getInstance = (target, component) => Data.get(target, component); + + /** + * Returns a namespaced `CustomEvent` specific to each component. + * @param {string} EventType Event.type + * @param {Record=} config Event.options | Event.properties + * @returns {SHORTER.OriginalEvent} a new namespaced event + */ + function OriginalEvent(EventType, config) { + const OriginalCustomEvent = new CustomEvent(EventType, { + cancelable: true, bubbles: true, + }); + + if (config instanceof Object) { + ObjectAssign(OriginalCustomEvent, config); } return OriginalCustomEvent; } + /** + * Global namespace for most components `fade` class. + */ + const fadeClass = 'fade'; + + /** + * Global namespace for most components `show` class. + */ + const showClass = 'show'; + + /** + * Global namespace for most components `dismiss` option. + */ + const dataBsDismiss = 'data-bs-dismiss'; + + /** @type {string} */ + const alertString = 'alert'; + + /** @type {string} */ + const alertComponent = 'Alert'; + + /** + * Shortcut for `HTMLElement.getAttribute()` method. + * @param {HTMLElement | Element} element target element + * @param {string} attribute attribute name + * @returns {string?} attribute value + */ + const getAttribute = (element, attribute) => element.getAttribute(attribute); + + /** + * The raw value or a given component option. + * + * @typedef {string | HTMLElement | Function | number | boolean | null} niceValue + */ + + /** + * Utility to normalize component options + * + * @param {any} value the input value + * @return {niceValue} the normalized value + */ function normalizeValue(value) { - if (value === 'true') { + if (value === 'true') { // boolean return true; } - if (value === 'false') { + if (value === 'false') { // boolean return false; } - if (!Number.isNaN(+value)) { + if (!Number.isNaN(+value)) { // number return +value; } - if (value === '' || value === 'null') { + if (value === '' || value === 'null') { // null return null; } - // string / function / Element / Object + // string / function / HTMLElement / object return value; } + /** + * Shortcut for `Object.keys()` static method. + * @param {Record} obj a target object + * @returns {string[]} + */ + const ObjectKeys = (obj) => Object.keys(obj); + + /** + * Shortcut for `String.toLowerCase()`. + * + * @param {string} source input string + * @returns {string} lowercase output string + */ + const toLowerCase = (source) => source.toLowerCase(); + + /** + * Utility to normalize component options. + * + * @param {HTMLElement | Element} element target + * @param {Record} defaultOps component default options + * @param {Record} inputOps component instance options + * @param {string=} ns component namespace + * @return {Record} normalized component options object + */ function normalizeOptions(element, defaultOps, inputOps, ns) { - const normalOps = {}; - const dataOps = {}; + // @ts-ignore -- our targets are always `HTMLElement` const data = { ...element.dataset }; + /** @type {Record} */ + const normalOps = {}; + /** @type {Record} */ + const dataOps = {}; + const title = 'title'; - Object.keys(data) - .forEach((k) => { - const key = k.includes(ns) - ? k.replace(ns, '').replace(/[A-Z]/, (match) => match.toLowerCase()) - : k; + ObjectKeys(data).forEach((k) => { + const key = ns && k.includes(ns) + ? k.replace(ns, '').replace(/[A-Z]/, (match) => toLowerCase(match)) + : k; - dataOps[key] = normalizeValue(data[k]); - }); + dataOps[key] = normalizeValue(data[k]); + }); - Object.keys(inputOps) - .forEach((k) => { - inputOps[k] = normalizeValue(inputOps[k]); - }); + ObjectKeys(inputOps).forEach((k) => { + inputOps[k] = normalizeValue(inputOps[k]); + }); - Object.keys(defaultOps) - .forEach((k) => { - if (k in inputOps) { - normalOps[k] = inputOps[k]; - } else if (k in dataOps) { - normalOps[k] = dataOps[k]; - } else { - normalOps[k] = defaultOps[k]; - } - }); + ObjectKeys(defaultOps).forEach((k) => { + if (k in inputOps) { + normalOps[k] = inputOps[k]; + } else if (k in dataOps) { + normalOps[k] = dataOps[k]; + } else { + normalOps[k] = k === title + ? getAttribute(element, title) + : defaultOps[k]; + } + }); return normalOps; } + var version = "4.1.0"; + + const Version = version; + /* Native JavaScript for Bootstrap 5 | Base Component ----------------------------------------------------- */ + /** Returns a new `BaseComponent` instance. */ class BaseComponent { - constructor(name, target, defaults, config) { + /** + * @param {HTMLElement | Element | string} target `Element` or selector string + * @param {BSN.ComponentOptions=} config component instance options + */ + constructor(target, config) { const self = this; - const element = queryElement(target); + const element = querySelector(target); - if (element[name]) element[name].dispose(); + if (!element) { + throw Error(`${self.name} Error: "${target}" is not a valid selector.`); + } + + /** @static @type {BSN.ComponentOptions} */ + self.options = {}; + + const prevInstance = Data.get(element, self.name); + if (prevInstance) prevInstance.dispose(); + + /** @type {HTMLElement | Element} */ self.element = element; - if (defaults && Object.keys(defaults).length) { - self.options = normalizeOptions(element, defaults, (config || {}), 'bs'); + if (self.defaults && Object.keys(self.defaults).length) { + self.options = normalizeOptions(element, self.defaults, (config || {}), 'bs'); } - element[name] = self; + + Data.set(element, self.name, self); } - dispose(name) { + /* eslint-disable */ + /** @static */ + get version() { return Version; } + /* eslint-enable */ + + /** @static */ + get name() { return this.constructor.name; } + + /** @static */ + // @ts-ignore + get defaults() { return this.constructor.defaults; } + + /** + * Removes component from target element; + */ + dispose() { const self = this; - self.element[name] = null; - Object.keys(self).forEach((prop) => { self[prop] = null; }); + Data.remove(self.element, self.name); + // @ts-ignore + ObjectKeys(self).forEach((prop) => { self[prop] = null; }); } } @@ -168,24 +610,39 @@ // ALERT PRIVATE GC // ================ - const alertString = 'alert'; - const alertComponent = 'Alert'; const alertSelector = `.${alertString}`; const alertDismissSelector = `[${dataBsDismiss}="${alertString}"]`; + /** + * Static method which returns an existing `Alert` instance associated + * to a target `Element`. + * + * @type {BSN.GetInstance} + */ + const getAlertInstance = (element) => getInstance(element, alertComponent); + + /** + * An `Alert` initialization callback. + * @type {BSN.InitCallback} + */ + const alertInitCallback = (element) => new Alert(element); + // ALERT CUSTOM EVENTS // =================== - const closeAlertEvent = bootstrapCustomEvent(`close.bs.${alertString}`); - const closedAlertEvent = bootstrapCustomEvent(`closed.bs.${alertString}`); + const closeAlertEvent = OriginalEvent(`close.bs.${alertString}`); + const closedAlertEvent = OriginalEvent(`closed.bs.${alertString}`); - // ALERT EVENT HANDLERS - // ==================== + // ALERT EVENT HANDLER + // =================== + /** + * Alert `transitionend` callback. + * @param {Alert} self target Alert instance + */ function alertTransitionEnd(self) { - const { element, relatedTarget } = self; + const { element } = self; toggleAlertHandler(self); - if (relatedTarget) closedAlertEvent.relatedTarget = relatedTarget; - element.dispatchEvent(closedAlertEvent); + dispatchEvent(element, closedAlertEvent); self.dispose(); element.remove(); @@ -193,16 +650,24 @@ // ALERT PRIVATE METHOD // ==================== + /** + * Toggle on / off the `click` event listener. + * @param {Alert} self the target alert instance + * @param {boolean=} add when `true`, event listener is added + */ function toggleAlertHandler(self, add) { - const action = add ? addEventListener : removeEventListener; - if (self.dismiss) self.dismiss[action]('click', self.close); + const action = add ? addListener : removeListener; + const { dismiss } = self; + if (dismiss) action(dismiss, mouseclickEvent, self.close); } // ALERT DEFINITION // ================ + /** Creates a new Alert instance. */ class Alert extends BaseComponent { + /** @param {HTMLElement | Element | string} target element or selector */ constructor(target) { - super(alertComponent, target); + super(target); // bind const self = this; @@ -210,28 +675,39 @@ const { element } = self; // the dismiss button - self.dismiss = queryElement(alertDismissSelector, element); - self.relatedTarget = null; + /** @static @type {(HTMLElement | Element)?} */ + self.dismiss = querySelector(alertDismissSelector, element); // add event listener - toggleAlertHandler(self, 1); + toggleAlertHandler(self, true); } + /* eslint-disable */ + /** + * Returns component name string. + * @readonly @static + */ + get name() { return alertComponent; } + /* eslint-enable */ + // ALERT PUBLIC METHODS // ==================== + /** + * Public method that hides the `.alert` element from the user, + * disposes the instance once animation is complete, then + * removes the element from the DOM. + * + * @param {Event=} e most likely the `click` event + * @this {Alert} the `Alert` instance or `EventTarget` + */ close(e) { - const target = e ? e.target : null; - const self = e - ? e.target.closest(alertSelector)[alertComponent] - : this; + // @ts-ignore + const self = e ? getAlertInstance(closest(this, alertSelector)) : this; + if (!self) return; const { element } = self; - if (self && element && hasClass(element, showClass)) { - if (target) { - closeAlertEvent.relatedTarget = target; - self.relatedTarget = target; - } - element.dispatchEvent(closeAlertEvent); + if (hasClass(element, showClass)) { + dispatchEvent(element, closeAlertEvent); if (closeAlertEvent.defaultPrevented) return; removeClass(element, showClass); @@ -242,123 +718,452 @@ } } + /** Remove the component from target element. */ dispose() { toggleAlertHandler(this); - super.dispose(alertComponent); + super.dispose(); } } - Alert.init = { - component: alertComponent, + ObjectAssign(Alert, { selector: alertSelector, - constructor: Alert, - }; + init: alertInitCallback, + getInstance: getAlertInstance, + }); + /** + * A global namespace for aria-pressed. + * @type {string} + */ + const ariaPressed = 'aria-pressed'; + + /** + * Shortcut for `HTMLElement.setAttribute()` method. + * @param {HTMLElement | Element} element target element + * @param {string} attribute attribute name + * @param {string} value attribute value + * @returns {void} + */ + const setAttribute = (element, attribute, value) => element.setAttribute(attribute, value); + + /** + * Add class to `HTMLElement.classList`. + * + * @param {HTMLElement | Element} element target + * @param {string} classNAME to add + * @returns {void} + */ function addClass(element, classNAME) { element.classList.add(classNAME); } + /** + * Global namespace for most components active class. + */ const activeClass = 'active'; + /** + * Global namespace for most components `toggle` option. + */ const dataBsToggle = 'data-bs-toggle'; + /** @type {string} */ + const buttonString = 'button'; + + /** @type {string} */ + const buttonComponent = 'Button'; + /* Native JavaScript for Bootstrap 5 | Button ---------------------------------------------*/ // BUTTON PRIVATE GC // ================= - const buttonString = 'button'; - const buttonComponent = 'Button'; const buttonSelector = `[${dataBsToggle}="${buttonString}"]`; - const ariaPressed = 'aria-pressed'; + + /** + * Static method which returns an existing `Button` instance associated + * to a target `Element`. + * + * @type {BSN.GetInstance