2020-05-03 15:24:51 +00:00
|
|
|
#![forbid(unsafe_code)]
|
2021-11-19 16:50:16 +00:00
|
|
|
#![warn(rust_2018_idioms)]
|
2021-11-16 16:07:55 +00:00
|
|
|
#![warn(rust_2021_compatibility)]
|
2020-07-14 21:44:01 +00:00
|
|
|
#![cfg_attr(feature = "unstable", feature(ip))]
|
2021-11-05 18:18:54 +00:00
|
|
|
// The recursion_limit is mainly triggered by the json!() macro.
|
|
|
|
// The more key/value pairs there are the more recursion occurs.
|
|
|
|
// We want to keep this as low as possible, but not higher then 128.
|
|
|
|
// If you go above 128 it will cause rust-analyzer to fail,
|
|
|
|
#![recursion_limit = "87"]
|
2018-10-10 18:40:39 +00:00
|
|
|
|
2022-03-20 17:51:24 +00:00
|
|
|
// When enabled use MiMalloc as malloc instead of the default malloc
|
|
|
|
#[cfg(feature = "enable_mimalloc")]
|
|
|
|
use mimalloc::MiMalloc;
|
|
|
|
#[cfg(feature = "enable_mimalloc")]
|
|
|
|
#[cfg_attr(feature = "enable_mimalloc", global_allocator)]
|
|
|
|
static GLOBAL: MiMalloc = MiMalloc;
|
|
|
|
|
2018-12-30 22:34:31 +00:00
|
|
|
#[macro_use]
|
|
|
|
extern crate rocket;
|
|
|
|
#[macro_use]
|
2021-01-31 19:07:42 +00:00
|
|
|
extern crate serde;
|
2018-12-30 22:34:31 +00:00
|
|
|
#[macro_use]
|
|
|
|
extern crate serde_json;
|
|
|
|
#[macro_use]
|
|
|
|
extern crate log;
|
|
|
|
#[macro_use]
|
|
|
|
extern crate diesel;
|
|
|
|
#[macro_use]
|
|
|
|
extern crate diesel_migrations;
|
2020-03-16 16:53:22 +00:00
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
use std::{
|
|
|
|
fs::{canonicalize, create_dir_all},
|
|
|
|
panic,
|
|
|
|
path::Path,
|
|
|
|
process::exit,
|
|
|
|
str::FromStr,
|
|
|
|
thread,
|
|
|
|
};
|
2018-02-10 00:00:55 +00:00
|
|
|
|
2018-12-30 22:34:31 +00:00
|
|
|
#[macro_use]
|
|
|
|
mod error;
|
2018-02-10 00:00:55 +00:00
|
|
|
mod api;
|
|
|
|
mod auth;
|
2019-01-25 17:23:51 +00:00
|
|
|
mod config;
|
2018-12-30 22:34:31 +00:00
|
|
|
mod crypto;
|
2020-08-18 15:15:44 +00:00
|
|
|
#[macro_use]
|
2018-12-30 22:34:31 +00:00
|
|
|
mod db;
|
2018-08-15 06:32:19 +00:00
|
|
|
mod mail;
|
2021-12-22 20:48:49 +00:00
|
|
|
mod ratelimit;
|
2018-12-30 22:34:31 +00:00
|
|
|
mod util;
|
2018-02-10 00:00:55 +00:00
|
|
|
|
2019-01-25 17:23:51 +00:00
|
|
|
pub use config::CONFIG;
|
2019-02-14 01:03:37 +00:00
|
|
|
pub use error::{Error, MapResult};
|
2021-11-07 17:53:39 +00:00
|
|
|
use rocket::data::{Limits, ToByteUnit};
|
2021-02-27 03:40:12 +00:00
|
|
|
pub use util::is_running_in_docker;
|
2019-01-25 17:23:51 +00:00
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
#[rocket::main]
|
|
|
|
async fn main() -> Result<(), Error> {
|
2020-03-02 19:57:06 +00:00
|
|
|
parse_args();
|
2019-02-20 19:59:37 +00:00
|
|
|
launch_info();
|
|
|
|
|
2019-12-06 21:19:07 +00:00
|
|
|
use log::LevelFilter as LF;
|
|
|
|
let level = LF::from_str(&CONFIG.log_level()).expect("Valid log level");
|
|
|
|
init_logging(level).ok();
|
|
|
|
|
2021-03-27 14:03:07 +00:00
|
|
|
let extra_debug = matches!(level, LF::Trace | LF::Debug);
|
2018-12-06 19:35:25 +00:00
|
|
|
|
2021-02-27 03:40:12 +00:00
|
|
|
check_data_folder();
|
2021-06-25 18:49:44 +00:00
|
|
|
check_rsa_keys().unwrap_or_else(|_| {
|
|
|
|
error!("Error creating keys, exiting...");
|
|
|
|
exit(1);
|
|
|
|
});
|
2018-09-13 18:59:51 +00:00
|
|
|
check_web_vault();
|
2018-02-10 00:00:55 +00:00
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
|
|
|
|
create_dir(&CONFIG.tmp_folder(), "tmp folder");
|
|
|
|
create_dir(&CONFIG.sends_folder(), "sends folder");
|
|
|
|
create_dir(&CONFIG.attachments_folder(), "attachments folder");
|
2019-11-06 19:21:47 +00:00
|
|
|
|
2022-03-20 17:51:24 +00:00
|
|
|
let pool = create_db_pool().await;
|
2021-11-07 17:53:39 +00:00
|
|
|
schedule_jobs(pool.clone()).await;
|
2021-11-16 16:07:55 +00:00
|
|
|
crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().await.unwrap()).await.unwrap();
|
2021-06-07 21:34:00 +00:00
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
launch_rocket(pool, extra_debug).await // Blocks until program termination.
|
2018-02-10 00:00:55 +00:00
|
|
|
}
|
|
|
|
|
2021-02-07 19:10:40 +00:00
|
|
|
const HELP: &str = "\
|
2021-04-27 21:18:32 +00:00
|
|
|
Alternative implementation of the Bitwarden server API written in Rust
|
2021-03-27 14:03:07 +00:00
|
|
|
|
2021-02-07 19:10:40 +00:00
|
|
|
USAGE:
|
2021-04-27 21:18:32 +00:00
|
|
|
vaultwarden
|
2021-03-27 14:03:07 +00:00
|
|
|
|
2021-02-07 19:10:40 +00:00
|
|
|
FLAGS:
|
|
|
|
-h, --help Prints help information
|
|
|
|
-v, --version Prints the app version
|
|
|
|
";
|
|
|
|
|
2022-01-23 22:40:59 +00:00
|
|
|
pub const VERSION: Option<&str> = option_env!("VW_VERSION");
|
|
|
|
|
2020-03-02 19:57:06 +00:00
|
|
|
fn parse_args() {
|
2021-02-07 19:10:40 +00:00
|
|
|
let mut pargs = pico_args::Arguments::from_env();
|
2022-01-23 22:40:59 +00:00
|
|
|
let version = VERSION.unwrap_or("(Version info from Git not present)");
|
2021-02-07 19:10:40 +00:00
|
|
|
|
|
|
|
if pargs.contains(["-h", "--help"]) {
|
2022-01-23 22:40:59 +00:00
|
|
|
println!("vaultwarden {}", version);
|
2021-02-07 19:10:40 +00:00
|
|
|
print!("{}", HELP);
|
|
|
|
exit(0);
|
|
|
|
} else if pargs.contains(["-v", "--version"]) {
|
2022-01-23 22:40:59 +00:00
|
|
|
println!("vaultwarden {}", version);
|
2020-03-02 19:57:06 +00:00
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-20 19:59:37 +00:00
|
|
|
fn launch_info() {
|
|
|
|
println!("/--------------------------------------------------------------------\\");
|
2021-04-27 21:18:32 +00:00
|
|
|
println!("| Starting Vaultwarden |");
|
2019-02-20 19:59:37 +00:00
|
|
|
|
2022-01-23 22:40:59 +00:00
|
|
|
if let Some(version) = VERSION {
|
2019-02-20 19:59:37 +00:00
|
|
|
println!("|{:^68}|", format!("Version {}", version));
|
|
|
|
}
|
|
|
|
|
|
|
|
println!("|--------------------------------------------------------------------|");
|
|
|
|
println!("| This is an *unofficial* Bitwarden implementation, DO NOT use the |");
|
|
|
|
println!("| official channels to report bugs/features, regardless of client. |");
|
2020-05-13 19:29:47 +00:00
|
|
|
println!("| Send usage/configuration questions or feature requests to: |");
|
2021-04-30 20:40:12 +00:00
|
|
|
println!("| https://vaultwarden.discourse.group/ |");
|
2020-05-13 19:29:47 +00:00
|
|
|
println!("| Report suspected bugs/issues in the software itself at: |");
|
2021-04-27 21:18:32 +00:00
|
|
|
println!("| https://github.com/dani-garcia/vaultwarden/issues/new |");
|
2019-02-20 19:59:37 +00:00
|
|
|
println!("\\--------------------------------------------------------------------/\n");
|
|
|
|
}
|
|
|
|
|
2019-12-06 21:19:07 +00:00
|
|
|
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
2021-11-28 12:02:27 +00:00
|
|
|
// Depending on the main log level we either want to disable or enable logging for trust-dns.
|
|
|
|
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this.
|
|
|
|
let trust_dns_level = if level >= log::LevelFilter::Debug {
|
|
|
|
level
|
|
|
|
} else {
|
|
|
|
log::LevelFilter::Off
|
|
|
|
};
|
|
|
|
|
2018-12-06 19:35:25 +00:00
|
|
|
let mut logger = fern::Dispatch::new()
|
2019-12-06 21:19:07 +00:00
|
|
|
.level(level)
|
|
|
|
// Hide unknown certificate errors if using self-signed
|
|
|
|
.level_for("rustls::session", log::LevelFilter::Off)
|
|
|
|
// Hide failed to close stream messages
|
|
|
|
.level_for("hyper::server", log::LevelFilter::Warn)
|
|
|
|
// Silence rocket logs
|
|
|
|
.level_for("_", log::LevelFilter::Off)
|
2021-11-07 17:53:39 +00:00
|
|
|
.level_for("rocket::launch", log::LevelFilter::Error)
|
|
|
|
.level_for("rocket::launch_", log::LevelFilter::Error)
|
|
|
|
.level_for("rocket::rocket", log::LevelFilter::Warn)
|
|
|
|
.level_for("rocket::server", log::LevelFilter::Warn)
|
|
|
|
.level_for("rocket::fairing::fairings", log::LevelFilter::Warn)
|
|
|
|
.level_for("rocket::shield::shield", log::LevelFilter::Warn)
|
2020-12-08 16:33:15 +00:00
|
|
|
// Never show html5ever and hyper::proto logs, too noisy
|
|
|
|
.level_for("html5ever", log::LevelFilter::Off)
|
|
|
|
.level_for("hyper::proto", log::LevelFilter::Off)
|
2021-05-16 13:29:13 +00:00
|
|
|
.level_for("hyper::client", log::LevelFilter::Off)
|
|
|
|
// Prevent cookie_store logs
|
|
|
|
.level_for("cookie_store", log::LevelFilter::Off)
|
2021-11-28 12:02:27 +00:00
|
|
|
// Variable level for trust-dns used by reqwest
|
|
|
|
.level_for("trust_dns_proto", trust_dns_level)
|
2019-12-06 21:19:07 +00:00
|
|
|
.chain(std::io::stdout());
|
|
|
|
|
2020-11-18 11:07:08 +00:00
|
|
|
// Enable smtp debug logging only specifically for smtp when need.
|
|
|
|
// This can contain sensitive information we do not want in the default debug/trace logging.
|
|
|
|
if CONFIG.smtp_debug() {
|
2021-03-31 20:18:35 +00:00
|
|
|
println!(
|
|
|
|
"[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!"
|
|
|
|
);
|
2020-11-18 11:07:08 +00:00
|
|
|
println!("[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n");
|
|
|
|
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Debug)
|
|
|
|
} else {
|
|
|
|
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Off)
|
|
|
|
}
|
|
|
|
|
2019-12-06 21:19:07 +00:00
|
|
|
if CONFIG.extended_logging() {
|
|
|
|
logger = logger.format(|out, message, record| {
|
2018-12-30 22:34:31 +00:00
|
|
|
out.finish(format_args!(
|
2020-07-23 04:50:49 +00:00
|
|
|
"[{}][{}][{}] {}",
|
|
|
|
chrono::Local::now().format(&CONFIG.log_timestamp_format()),
|
2018-12-30 22:34:31 +00:00
|
|
|
record.target(),
|
|
|
|
record.level(),
|
|
|
|
message
|
|
|
|
))
|
2019-12-06 21:19:07 +00:00
|
|
|
});
|
|
|
|
} else {
|
|
|
|
logger = logger.format(|out, message, _| out.finish(format_args!("{}", message)));
|
|
|
|
}
|
2018-12-06 19:35:25 +00:00
|
|
|
|
2019-01-25 17:23:51 +00:00
|
|
|
if let Some(log_file) = CONFIG.log_file() {
|
2018-12-06 19:35:25 +00:00
|
|
|
logger = logger.chain(fern::log_file(log_file)?);
|
|
|
|
}
|
|
|
|
|
2019-04-26 20:08:26 +00:00
|
|
|
#[cfg(not(windows))]
|
|
|
|
{
|
|
|
|
if cfg!(feature = "enable_syslog") || CONFIG.use_syslog() {
|
2019-03-29 19:27:20 +00:00
|
|
|
logger = chain_syslog(logger);
|
|
|
|
}
|
|
|
|
}
|
2018-12-06 19:35:25 +00:00
|
|
|
|
|
|
|
logger.apply()?;
|
|
|
|
|
2020-02-25 13:10:52 +00:00
|
|
|
// Catch panics and log them instead of default output to StdErr
|
|
|
|
panic::set_hook(Box::new(|info| {
|
2020-03-16 16:53:22 +00:00
|
|
|
let thread = thread::current();
|
|
|
|
let thread = thread.name().unwrap_or("unnamed");
|
|
|
|
|
|
|
|
let msg = match info.payload().downcast_ref::<&'static str>() {
|
|
|
|
Some(s) => *s,
|
|
|
|
None => match info.payload().downcast_ref::<String>() {
|
|
|
|
Some(s) => &**s,
|
|
|
|
None => "Box<Any>",
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-08-12 16:45:26 +00:00
|
|
|
let backtrace = backtrace::Backtrace::new();
|
|
|
|
|
2020-03-16 16:53:22 +00:00
|
|
|
match info.location() {
|
|
|
|
Some(location) => {
|
|
|
|
error!(
|
2020-08-12 16:45:26 +00:00
|
|
|
target: "panic", "thread '{}' panicked at '{}': {}:{}\n{:?}",
|
2020-03-16 16:53:22 +00:00
|
|
|
thread,
|
|
|
|
msg,
|
|
|
|
location.file(),
|
|
|
|
location.line(),
|
2020-08-12 16:45:26 +00:00
|
|
|
backtrace
|
2020-03-16 16:53:22 +00:00
|
|
|
);
|
|
|
|
}
|
2020-06-15 21:40:39 +00:00
|
|
|
None => error!(
|
|
|
|
target: "panic",
|
2020-08-12 16:45:26 +00:00
|
|
|
"thread '{}' panicked at '{}'\n{:?}",
|
2020-06-15 21:40:39 +00:00
|
|
|
thread,
|
|
|
|
msg,
|
2020-08-12 16:45:26 +00:00
|
|
|
backtrace
|
2020-06-15 21:40:39 +00:00
|
|
|
),
|
2020-03-16 16:53:22 +00:00
|
|
|
}
|
2020-02-25 13:10:52 +00:00
|
|
|
}));
|
|
|
|
|
2018-12-06 19:35:25 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-03-29 19:27:20 +00:00
|
|
|
#[cfg(not(windows))]
|
2018-12-06 19:35:25 +00:00
|
|
|
fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
|
|
|
let syslog_fmt = syslog::Formatter3164 {
|
|
|
|
facility: syslog::Facility::LOG_USER,
|
|
|
|
hostname: None,
|
2021-04-27 21:18:32 +00:00
|
|
|
process: "vaultwarden".into(),
|
2018-12-06 19:35:25 +00:00
|
|
|
pid: 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
match syslog::unix(syslog_fmt) {
|
|
|
|
Ok(sl) => logger.chain(sl),
|
|
|
|
Err(e) => {
|
|
|
|
error!("Unable to connect to syslog: {:?}", e);
|
|
|
|
logger
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-27 03:40:12 +00:00
|
|
|
fn create_dir(path: &str, description: &str) {
|
|
|
|
// Try to create the specified dir, if it doesn't already exist.
|
|
|
|
let err_msg = format!("Error creating {} directory '{}'", description, path);
|
|
|
|
create_dir_all(path).expect(&err_msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn check_data_folder() {
|
|
|
|
let data_folder = &CONFIG.data_folder();
|
|
|
|
let path = Path::new(data_folder);
|
|
|
|
if !path.exists() {
|
|
|
|
error!("Data folder '{}' doesn't exist.", data_folder);
|
|
|
|
if is_running_in_docker() {
|
|
|
|
error!("Verify that your data volume is mounted at the correct location.");
|
|
|
|
} else {
|
|
|
|
error!("Create the data folder and try again.");
|
|
|
|
}
|
|
|
|
exit(1);
|
|
|
|
}
|
2019-11-06 19:21:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-26 12:21:58 +00:00
|
|
|
fn check_rsa_keys() -> Result<(), crate::error::Error> {
|
2018-02-17 00:13:02 +00:00
|
|
|
// If the RSA keys don't exist, try to create them
|
2021-06-25 18:49:44 +00:00
|
|
|
let priv_path = CONFIG.private_rsa_key();
|
|
|
|
let pub_path = CONFIG.public_rsa_key();
|
2019-02-20 19:59:37 +00:00
|
|
|
|
2021-06-25 18:49:44 +00:00
|
|
|
if !util::file_exists(&priv_path) {
|
|
|
|
let rsa_key = openssl::rsa::Rsa::generate(2048)?;
|
2018-12-30 22:34:31 +00:00
|
|
|
|
2021-06-25 18:49:44 +00:00
|
|
|
let priv_key = rsa_key.private_key_to_pem()?;
|
|
|
|
crate::util::write_file(&priv_path, &priv_key)?;
|
|
|
|
info!("Private key created correctly.");
|
|
|
|
}
|
2018-12-30 22:34:31 +00:00
|
|
|
|
2021-06-25 18:49:44 +00:00
|
|
|
if !util::file_exists(&pub_path) {
|
|
|
|
let rsa_key = openssl::rsa::Rsa::private_key_from_pem(&util::read_file(&priv_path)?)?;
|
2018-02-17 00:13:02 +00:00
|
|
|
|
2021-06-25 18:49:44 +00:00
|
|
|
let pub_key = rsa_key.public_key_to_pem()?;
|
|
|
|
crate::util::write_file(&pub_path, &pub_key)?;
|
|
|
|
info!("Public key created correctly.");
|
2018-02-17 00:13:02 +00:00
|
|
|
}
|
2021-06-25 18:49:44 +00:00
|
|
|
|
|
|
|
auth::load_keys();
|
|
|
|
Ok(())
|
2018-02-17 00:13:02 +00:00
|
|
|
}
|
|
|
|
|
2018-04-24 20:38:23 +00:00
|
|
|
fn check_web_vault() {
|
2019-01-25 17:23:51 +00:00
|
|
|
if !CONFIG.web_vault_enabled() {
|
2018-06-12 19:09:42 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-01-25 17:23:51 +00:00
|
|
|
let index_path = Path::new(&CONFIG.web_vault_folder()).join("index.html");
|
2018-04-24 20:38:23 +00:00
|
|
|
|
|
|
|
if !index_path.exists() {
|
2021-03-31 20:18:35 +00:00
|
|
|
error!(
|
|
|
|
"Web vault is not found at '{}'. To install it, please follow the steps in: ",
|
|
|
|
CONFIG.web_vault_folder()
|
|
|
|
);
|
2021-04-27 21:18:32 +00:00
|
|
|
error!("https://github.com/dani-garcia/vaultwarden/wiki/Building-binary#install-the-web-vault");
|
2019-12-29 14:29:46 +00:00
|
|
|
error!("You can also set the environment variable 'WEB_VAULT_ENABLED=false' to disable it");
|
2018-04-24 20:38:23 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-20 17:51:24 +00:00
|
|
|
async fn create_db_pool() -> db::DbPool {
|
|
|
|
match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()).await {
|
2020-08-18 15:15:44 +00:00
|
|
|
Ok(p) => p,
|
|
|
|
Err(e) => {
|
|
|
|
error!("Error creating database pool: {:?}", e);
|
|
|
|
exit(1);
|
|
|
|
}
|
2021-04-03 03:16:49 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-22 18:57:35 +00:00
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> {
|
2020-02-19 05:27:00 +00:00
|
|
|
let basepath = &CONFIG.domain_path();
|
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
let mut config = rocket::Config::from(rocket::Config::figment());
|
|
|
|
config.temp_dir = canonicalize(CONFIG.tmp_folder()).unwrap().into();
|
|
|
|
config.limits = Limits::new() //
|
|
|
|
.limit("json", 10.megabytes())
|
|
|
|
.limit("data-form", 150.megabytes())
|
|
|
|
.limit("file", 150.megabytes());
|
|
|
|
|
2020-02-19 05:27:00 +00:00
|
|
|
// If adding more paths here, consider also adding them to
|
2019-12-06 21:19:07 +00:00
|
|
|
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
2021-11-07 17:53:39 +00:00
|
|
|
let instance = rocket::custom(config)
|
|
|
|
.mount([basepath, "/"].concat(), api::web_routes())
|
|
|
|
.mount([basepath, "/api"].concat(), api::core_routes())
|
|
|
|
.mount([basepath, "/admin"].concat(), api::admin_routes())
|
|
|
|
.mount([basepath, "/identity"].concat(), api::identity_routes())
|
|
|
|
.mount([basepath, "/icons"].concat(), api::icons_routes())
|
|
|
|
.mount([basepath, "/notifications"].concat(), api::notifications_routes())
|
2020-08-18 15:15:44 +00:00
|
|
|
.manage(pool)
|
2019-02-20 19:59:37 +00:00
|
|
|
.manage(api::start_notification_server())
|
2019-09-01 11:00:12 +00:00
|
|
|
.attach(util::AppHeaders())
|
2021-03-27 14:26:32 +00:00
|
|
|
.attach(util::Cors())
|
2020-06-15 21:40:39 +00:00
|
|
|
.attach(util::BetterLogging(extra_debug))
|
2021-11-07 17:53:39 +00:00
|
|
|
.ignite()
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
CONFIG.set_rocket_shutdown_handle(instance.shutdown());
|
|
|
|
ctrlc::set_handler(move || {
|
|
|
|
info!("Exiting vaultwarden!");
|
|
|
|
CONFIG.shutdown();
|
|
|
|
})
|
|
|
|
.expect("Error setting Ctrl-C handler");
|
2019-02-20 19:59:37 +00:00
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
instance.launch().await?;
|
|
|
|
|
|
|
|
info!("Vaultwarden process exited!");
|
|
|
|
Ok(())
|
2019-01-11 13:18:13 +00:00
|
|
|
}
|
2021-04-03 03:16:49 +00:00
|
|
|
|
2021-11-07 17:53:39 +00:00
|
|
|
async fn schedule_jobs(pool: db::DbPool) {
|
2021-04-03 03:16:49 +00:00
|
|
|
if CONFIG.job_poll_interval_ms() == 0 {
|
|
|
|
info!("Job scheduler disabled.");
|
|
|
|
return;
|
|
|
|
}
|
2021-11-07 17:53:39 +00:00
|
|
|
|
2022-05-04 19:13:05 +00:00
|
|
|
let runtime = tokio::runtime::Runtime::new().unwrap();
|
2021-11-07 17:53:39 +00:00
|
|
|
|
2021-04-06 20:55:28 +00:00
|
|
|
thread::Builder::new()
|
|
|
|
.name("job-scheduler".to_string())
|
|
|
|
.spawn(move || {
|
2021-11-07 17:53:39 +00:00
|
|
|
use job_scheduler::{Job, JobScheduler};
|
2022-05-04 19:13:05 +00:00
|
|
|
let _runtime_guard = runtime.enter();
|
2021-11-07 17:53:39 +00:00
|
|
|
|
2021-04-06 20:55:28 +00:00
|
|
|
let mut sched = JobScheduler::new();
|
|
|
|
|
|
|
|
// Purge sends that are past their deletion date.
|
|
|
|
if !CONFIG.send_purge_schedule().is_empty() {
|
|
|
|
sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || {
|
2021-11-07 17:53:39 +00:00
|
|
|
runtime.spawn(api::purge_sends(pool.clone()));
|
2021-04-06 20:55:28 +00:00
|
|
|
}));
|
|
|
|
}
|
2021-04-03 03:16:49 +00:00
|
|
|
|
2021-04-06 20:55:28 +00:00
|
|
|
// Purge trashed items that are old enough to be auto-deleted.
|
|
|
|
if !CONFIG.trash_purge_schedule().is_empty() {
|
|
|
|
sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || {
|
2021-11-07 17:53:39 +00:00
|
|
|
runtime.spawn(api::purge_trashed_ciphers(pool.clone()));
|
2021-04-06 20:55:28 +00:00
|
|
|
}));
|
|
|
|
}
|
2021-04-03 03:52:15 +00:00
|
|
|
|
2021-10-25 08:36:05 +00:00
|
|
|
// Send email notifications about incomplete 2FA logins, which potentially
|
|
|
|
// indicates that a user's master password has been compromised.
|
|
|
|
if !CONFIG.incomplete_2fa_schedule().is_empty() {
|
|
|
|
sched.add(Job::new(CONFIG.incomplete_2fa_schedule().parse().unwrap(), || {
|
2021-11-07 17:53:39 +00:00
|
|
|
runtime.spawn(api::send_incomplete_2fa_notifications(pool.clone()));
|
2021-10-25 08:36:05 +00:00
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
2021-10-19 08:27:50 +00:00
|
|
|
// Grant emergency access requests that have met the required wait time.
|
|
|
|
// This job should run before the emergency access reminders job to avoid
|
|
|
|
// sending reminders for requests that are about to be granted anyway.
|
2021-03-24 19:15:55 +00:00
|
|
|
if !CONFIG.emergency_request_timeout_schedule().is_empty() {
|
|
|
|
sched.add(Job::new(CONFIG.emergency_request_timeout_schedule().parse().unwrap(), || {
|
2021-11-07 17:53:39 +00:00
|
|
|
runtime.spawn(api::emergency_request_timeout_job(pool.clone()));
|
2021-03-24 19:15:55 +00:00
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
2021-10-19 08:27:50 +00:00
|
|
|
// Send reminders to emergency access grantors that there are pending
|
|
|
|
// emergency access requests.
|
2021-03-24 19:15:55 +00:00
|
|
|
if !CONFIG.emergency_notification_reminder_schedule().is_empty() {
|
|
|
|
sched.add(Job::new(CONFIG.emergency_notification_reminder_schedule().parse().unwrap(), || {
|
2021-11-07 17:53:39 +00:00
|
|
|
runtime.spawn(api::emergency_notification_reminder_job(pool.clone()));
|
2021-03-24 19:15:55 +00:00
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
2021-04-06 20:55:28 +00:00
|
|
|
// Periodically check for jobs to run. We probably won't need any
|
|
|
|
// jobs that run more often than once a minute, so a default poll
|
|
|
|
// interval of 30 seconds should be sufficient. Users who want to
|
|
|
|
// schedule jobs to run more frequently for some reason can reduce
|
|
|
|
// the poll interval accordingly.
|
2021-10-19 08:27:50 +00:00
|
|
|
//
|
|
|
|
// Note that the scheduler checks jobs in the order in which they
|
|
|
|
// were added, so if two jobs are both eligible to run at a given
|
|
|
|
// tick, the one that was added earlier will run first.
|
2021-04-06 20:55:28 +00:00
|
|
|
loop {
|
|
|
|
sched.tick();
|
2022-03-20 17:51:24 +00:00
|
|
|
runtime.block_on(async move {
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_millis(CONFIG.job_poll_interval_ms())).await
|
|
|
|
});
|
2021-04-06 20:55:28 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.expect("Error spawning job scheduler thread");
|
2021-04-03 03:16:49 +00:00
|
|
|
}
|