Improve sync speed and updated dep. versions

Improved sync speed by resolving the N+1 query issues.
Solves #1402 and Solves #1453

With this change there is just one query done to retreive all the
important data, and matching is done in-code/memory.

With a very large database the sync time went down about 3 times.

Also updated misc crates and Github Actions versions.
This commit is contained in:
BlackDex
2022-05-04 21:13:05 +02:00
parent 3abf173d89
commit 3ca85028ea
38 changed files with 6084 additions and 5835 deletions

View File

@@ -46,7 +46,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # v2.4.0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
# End Checkout the repo
@@ -140,7 +140,7 @@ jobs:
# Upload artifact to Github Actions
- name: Upload artifact
uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2.3.1
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 # v3.0.0
with:
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}

View File

@@ -16,18 +16,18 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # v2.4.0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
# End Checkout the repo
# Download hadolint
# Download hadolint - https://github.com/hadolint/hadolint/releases
- name: Download hadolint
shell: bash
run: |
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
sudo chmod +x /usr/local/bin/hadolint
env:
HADOLINT_VERSION: 2.8.0
HADOLINT_VERSION: 2.10.0
# End Download hadolint
# Test Dockerfiles

View File

@@ -31,7 +31,7 @@ jobs:
steps:
- name: Skip Duplicates Actions
id: skip_check
uses: fkirc/skip-duplicate-actions@f75dd6564bb646f95277dc8c3b80612e46a4a1ea # v3.4.1
uses: fkirc/skip-duplicate-actions@9d116fa7e55f295019cfab7e3ab72b478bcf7fdd # v4.0.0
with:
cancel_others: 'true'
# Only run this when not creating a tag
@@ -60,13 +60,13 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # v2.4.0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
with:
fetch-depth: 0
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@42d299face0c5c43a0487c477f595ac9cf22f1a7 # v1.12.0
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v2.0.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}

View File

@@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.1.0
rev: v4.2.0
hooks:
- id: check-yaml
- id: check-json

231
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ name = "vaultwarden"
version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
edition = "2021"
rust-version = "1.59"
rust-version = "1.60"
resolver = "2"
repository = "https://github.com/dani-garcia/vaultwarden"
@@ -35,9 +35,10 @@ syslog = "6.0.1" # Needs to be v4 until fern is updated
[dependencies]
# Logging
log = "0.4.16"
log = "0.4.17"
fern = { version = "0.6.1", features = ["syslog-6"] }
tracing = { version = "0.1.34", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
backtrace = "0.3.65" # Logging panics to logfile instead stderr only
# A `dotenv` implementation for Rust
@@ -47,7 +48,7 @@ dotenvy = { version = "0.15.1", default-features = false }
once_cell = "1.10.0"
# Numerical libraries
num-traits = "0.2.14"
num-traits = "0.2.15"
num-derive = "0.3.3"
# Web framework
@@ -60,14 +61,14 @@ chashmap = "2.2.2" # Concurrent hashmap implementation
# Async futures
futures = "0.3.21"
tokio = { version = "1.17.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot"] }
tokio = { version = "1.18.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time"] }
# A generic serialization/deserialization framework
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.79"
serde = { version = "1.0.137", features = ["derive"] }
serde_json = "1.0.81"
# A safe, extensible ORM and Query builder
diesel = { version = "1.4.8", features = [ "chrono", "r2d2"] }
diesel = { version = "1.4.8", features = ["chrono", "r2d2"] }
diesel_migrations = "1.4.0"
# Bundled SQLite
@@ -78,7 +79,7 @@ rand = "0.8.5"
ring = "0.16.20"
# UUID generation
uuid = { version = "0.8.2", features = ["v4"] }
uuid = { version = "1.0.0", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.19", features = ["clock", "serde"], default-features = false }
@@ -107,8 +108,8 @@ webauthn-rs = "0.3.2"
url = "2.2.2"
# Email libraries
lettre = { version = "0.10.0-rc.5", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
idna = "0.2.3" # Punycode conversion
lettre = { version = "0.10.0-rc.6", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
percent-encoding = "2.1.0" # URL encoding library used for URL's in the emails
# Template library
@@ -129,7 +130,7 @@ cookie = "0.16.0"
cookie_store = "0.16.0"
# Used by U2F, JWT and Postgres
openssl = "0.10.38"
openssl = "0.10.40"
# CLI argument parsing
pico-args = "0.4.2"
@@ -139,21 +140,22 @@ paste = "1.0.7"
governor = "0.4.2"
# Capture CTRL+C
ctrlc = { version = "3.2.1", features = ["termination"] }
ctrlc = { version = "3.2.2", features = ["termination"] }
# Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.28", features = ["secure"], default-features = false, optional = true }
mimalloc = { version = "0.1.29", features = ["secure"], default-features = false, optional = true }
[patch.crates-io]
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '6bdd2f818642683b3aadbda51d7573abefe045ab' }
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '761ffb009ea9d35c32d3c8eecd948ec4434cd0a3' }
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
# to any issues or PRs for almost a year (as of April 2021). This hopefully
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
# In particular, `cron` has since implemented parsing of some common syntax
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
# 2022-05-04: Forked/Updated the job_scheduler again use the latest dependencies and some fixes.
job_scheduler = { git = 'https://github.com/BlackDex/job_scheduler', rev = '9100fc596a083fd9c0b560f8f11f108e0a19d07e' }
# Strip debuginfo from the release builds
# Also enable thin LTO for some optimizations

View File

@@ -3,7 +3,7 @@
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
{% set build_stage_base_image = "rust:1.59-bullseye" %}
{% set build_stage_base_image = "rust:1.60-bullseye" %}
{% if "alpine" in target_file %}
{% if "amd64" in target_file %}
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable" %}

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -27,7 +27,7 @@
FROM vaultwarden/web-vault@sha256:ad3b47c152206f25f2d2b70a93e68650a90d5c8105b894814f9bc7599517a603 as vault
########################## BUILD IMAGE ##########################
FROM rust:1.59-bullseye as build
FROM rust:1.60-bullseye as build

View File

@@ -1,7 +1,7 @@
#version = "One"
# version = "Two"
edition = "2021"
max_width = 120
newline_style = "Unix"
use_small_heuristics = "Off"
#struct_lit_single_line = false
#overflow_delimited_expr = true
# struct_lit_single_line = false
# overflow_delimited_expr = true

View File

@@ -101,30 +101,36 @@ struct SyncData {
async fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json<Value> {
let user_json = headers.user.to_json(&conn).await;
let folders = Folder::find_by_user(&headers.user.uuid, &conn).await;
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
// Get all ciphers which are visible by the user
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
let collections_json = stream::iter(Collection::find_by_user_uuid(&headers.user.uuid, &conn).await)
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, &ciphers, &conn).await;
// Lets generate the ciphers_json using all the gathered info
let ciphers_json: Vec<Value> = stream::iter(ciphers)
.then(|c| async {
let c = c; // Move out this single variable
c.to_json_details(&headers.user.uuid, &conn).await
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), &conn).await
})
.collect::<Vec<Value>>()
.collect()
.await;
let policies = OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn);
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
let ciphers_json = stream::iter(Cipher::find_by_user_visible(&headers.user.uuid, &conn).await)
let collections_json: Vec<Value> = stream::iter(Collection::find_by_user_uuid(&headers.user.uuid, &conn).await)
.then(|c| async {
let c = c; // Move out this single variable
c.to_json(&headers.host, &headers.user.uuid, &conn).await
c.to_json_details(&headers.user.uuid, Some(&cipher_sync_data), &conn).await
})
.collect::<Vec<Value>>()
.collect()
.await;
let sends = Send::find_by_user(&headers.user.uuid, &conn);
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
let folders_json: Vec<Value> =
Folder::find_by_user(&headers.user.uuid, &conn).await.iter().map(Folder::to_json).collect();
let sends_json: Vec<Value> =
Send::find_by_user(&headers.user.uuid, &conn).await.iter().map(Send::to_json).collect();
let policies_json: Vec<Value> =
OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn).await.iter().map(OrgPolicy::to_json).collect();
let domains_json = if data.exclude_domains {
Value::Null
@@ -147,10 +153,13 @@ async fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json<Value> {
#[get("/ciphers")]
async fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
let ciphers_json = stream::iter(Cipher::find_by_user_visible(&headers.user.uuid, &conn).await)
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, &ciphers, &conn).await;
let ciphers_json = stream::iter(ciphers)
.then(|c| async {
let c = c; // Move out this single variable
c.to_json(&headers.host, &headers.user.uuid, &conn).await
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), &conn).await
})
.collect::<Vec<Value>>()
.await;
@@ -173,7 +182,7 @@ async fn get_cipher(uuid: String, headers: Headers, conn: DbConn) -> JsonResult
err!("Cipher is not owned by user")
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &conn).await))
}
#[get("/ciphers/<uuid>/admin")]
@@ -303,7 +312,7 @@ async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbCo
let mut cipher = Cipher::new(data.Type, data.Name.clone());
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &conn).await))
}
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
@@ -582,7 +591,7 @@ async fn put_cipher(
update_cipher_from_data(&mut cipher, data, &headers, false, &conn, &nt, UpdateType::CipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &conn).await))
}
#[derive(Deserialize)]
@@ -797,7 +806,7 @@ async fn share_cipher_by_uuid(
)
.await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await))
}
/// v2 API for downloading an attachment. This just redirects the client to
@@ -866,7 +875,7 @@ async fn post_attachment_v2(
"AttachmentId": attachment_id,
"Url": url,
"FileUploadType": FileUploadType::Direct as i32,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, &conn).await,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, &conn).await,
})))
}
@@ -1035,7 +1044,7 @@ async fn post_attachment(
let (cipher, conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &conn).await))
}
#[post("/ciphers/<uuid>/attachment-admin", format = "multipart/form-data", data = "<data>")]
@@ -1399,7 +1408,7 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, n
cipher.save(conn).await?;
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await);
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await))
}
async fn _restore_multiple_ciphers(
@@ -1463,3 +1472,66 @@ async fn _delete_cipher_attachment_by_id(
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await);
Ok(())
}
/// This will hold all the necessary data to improve a full sync of all the ciphers
/// It can be used during the `Cipher::to_json()` call.
/// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed.
/// This will not improve the speed of a single cipher.to_json() call that much, so better not to use it for those calls.
pub struct CipherSyncData {
pub cipher_attachments: HashMap<String, Vec<Attachment>>,
pub cipher_folders: HashMap<String, String>,
pub cipher_favorites: HashSet<String>,
pub cipher_collections: HashMap<String, Vec<String>>,
pub user_organizations: HashMap<String, UserOrganization>,
pub user_collections: HashMap<String, CollectionUser>,
}
impl CipherSyncData {
pub async fn new(user_uuid: &str, ciphers: &Vec<Cipher>, conn: &DbConn) -> Self {
// Generate a list of Cipher UUID's to be used during a query filter with an eq_any.
let cipher_uuids = stream::iter(ciphers).map(|c| c.uuid.to_string()).collect::<Vec<String>>().await;
// Generate a list of Cipher UUID's containing a Vec with one or more Attachment records
let mut cipher_attachments: HashMap<String, Vec<Attachment>> = HashMap::new();
for attachment in Attachment::find_all_by_ciphers(&cipher_uuids, conn).await {
cipher_attachments.entry(attachment.cipher_uuid.to_string()).or_default().push(attachment);
}
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
let cipher_folders: HashMap<String, String> =
stream::iter(FolderCipher::find_by_user(user_uuid, conn).await).collect().await;
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
let cipher_favorites: HashSet<String> =
stream::iter(Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await).collect().await;
// Generate a HashMap with the Cipher UUID as key and one or more Collection UUID's
let mut cipher_collections: HashMap<String, Vec<String>> = HashMap::new();
for (cipher, collection) in Cipher::get_collections_with_cipher_by_user(user_uuid, conn).await {
cipher_collections.entry(cipher).or_default().push(collection);
}
// Generate a HashMap with the Organization UUID as key and the UserOrganization record
let user_organizations: HashMap<String, UserOrganization> =
stream::iter(UserOrganization::find_by_user(user_uuid, conn).await)
.map(|uo| (uo.org_uuid.to_string(), uo))
.collect()
.await;
// Generate a HashMap with the User_Collections UUID as key and the CollectionUser record
let user_collections: HashMap<String, CollectionUser> =
stream::iter(CollectionUser::find_by_user(user_uuid, conn).await)
.map(|uc| (uc.collection_uuid.to_string(), uc))
.collect()
.await;
Self {
cipher_attachments,
cipher_folders,
cipher_favorites,
cipher_collections,
user_organizations,
user_collections,
}
}
}

View File

@@ -5,7 +5,7 @@ use serde_json::Value;
use std::borrow::Borrow;
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString},
api::{core::CipherSyncData, EmptyResult, JsonResult, JsonUpcase, NumberOrString},
auth::{decode_emergency_access_invite, Headers},
db::{models::*, DbConn, DbPool},
mail, CONFIG,
@@ -595,10 +595,13 @@ async fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn)
err!("Emergency access not valid.")
}
let ciphers_json = stream::iter(Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn).await)
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn).await;
let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, &conn).await;
let ciphers_json = stream::iter(ciphers)
.then(|c| async {
let c = c; // Move out this single variable
c.to_json(&host, &emergency_access.grantor_uuid, &conn).await
c.to_json(&host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &conn).await
})
.collect::<Vec<Value>>()
.await;

View File

@@ -7,6 +7,7 @@ mod sends;
pub mod two_factor;
pub use ciphers::purge_trashed_ciphers;
pub use ciphers::CipherSyncData;
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
pub use sends::purge_sends;
pub use two_factor::send_incomplete_2fa_notifications;

View File

@@ -4,7 +4,10 @@ use rocket::Route;
use serde_json::Value;
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
api::{
core::CipherSyncData, EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData,
UpdateType,
},
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
db::{models::*, DbConn},
mail, CONFIG,
@@ -483,10 +486,13 @@ struct OrgIdData {
#[get("/ciphers/organization-details?<data..>")]
async fn get_org_details(data: OrgIdData, headers: Headers, conn: DbConn) -> Json<Value> {
let ciphers_json = stream::iter(Cipher::find_by_org(&data.organization_id, &conn).await)
let ciphers = Cipher::find_by_org(&data.organization_id, &conn).await;
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, &ciphers, &conn).await;
let ciphers_json = stream::iter(ciphers)
.then(|c| async {
let c = c; // Move out this single variable
c.to_json(&headers.host, &headers.user.uuid, &conn).await
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), &conn).await
})
.collect::<Vec<Value>>()
.await;

View File

@@ -206,16 +206,16 @@ macro_rules! db_run {
// Different code for each db
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
#[allow(unused)] use diesel::prelude::*;
#[allow(unused)] use crate::db::FromDb;
#[allow(unused)] use $crate::db::FromDb;
let conn = $conn.conn.clone();
let mut conn = conn.lock_owned().await;
match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
$($(
#[cfg($db)]
crate::db::DbConnInner::$db($conn) => {
$crate::db::DbConnInner::$db($conn) => {
paste::paste! {
#[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *};
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
#[allow(unused)] use [<__ $db _model>]::*;
}
@@ -227,16 +227,16 @@ macro_rules! db_run {
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
#[allow(unused)] use diesel::prelude::*;
#[allow(unused)] use crate::db::FromDb;
#[allow(unused)] use $crate::db::FromDb;
let conn = $conn.conn.clone();
let mut conn = conn.lock_owned().await;
match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
$($(
#[cfg($db)]
crate::db::DbConnInner::$db($conn) => {
$crate::db::DbConnInner::$db($conn) => {
paste::paste! {
#[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *};
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
// @ RAW: #[allow(unused)] use [<__ $db _model>]::*;
}
@@ -297,7 +297,7 @@ macro_rules! db_object {
paste::paste! {
#[allow(unused)] use super::*;
#[allow(unused)] use diesel::prelude::*;
#[allow(unused)] use crate::db::[<__ $db _schema>]::*;
#[allow(unused)] use $crate::db::[<__ $db _schema>]::*;
$( #[$attr] )*
pub struct [<$name Db>] { $(
@@ -309,7 +309,7 @@ macro_rules! db_object {
#[inline(always)] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } }
}
impl crate::db::FromDb for [<$name Db>] {
impl $crate::db::FromDb for [<$name Db>] {
type Output = super::$name;
#[allow(clippy::wrong_self_convention)]
#[inline(always)] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } }

View File

@@ -2,14 +2,12 @@ use std::io::ErrorKind;
use serde_json::Value;
use super::Cipher;
use crate::CONFIG;
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "attachments"]
#[changeset_options(treat_none_as_null="true")]
#[belongs_to(super::Cipher, foreign_key = "cipher_uuid")]
#[primary_key(id)]
pub struct Attachment {
pub id: String,
@@ -188,4 +186,15 @@ impl Attachment {
.unwrap_or(0)
}}
}
pub async fn find_all_by_ciphers(cipher_uuids: &Vec<String>, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
attachments::table
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
.select(attachments::all_columns)
.load::<AttachmentDb>(conn)
.expect("Error loading attachments")
.from_db()
}}
}
}

View File

@@ -1,19 +1,17 @@
use crate::CONFIG;
use chrono::{Duration, NaiveDateTime, Utc};
use serde_json::Value;
use crate::CONFIG;
use super::{Attachment, CollectionCipher, Favorite, FolderCipher, User, UserOrgStatus, UserOrgType, UserOrganization};
use super::{
Attachment, CollectionCipher, Favorite, FolderCipher, Organization, User, UserOrgStatus, UserOrgType,
UserOrganization,
};
use crate::api::core::CipherSyncData;
use std::borrow::Cow;
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "ciphers"]
#[changeset_options(treat_none_as_null="true")]
#[belongs_to(User, foreign_key = "user_uuid")]
#[belongs_to(Organization, foreign_key = "organization_uuid")]
#[primary_key(uuid)]
pub struct Cipher {
pub uuid: String,
@@ -82,22 +80,32 @@ use crate::error::MapResult;
/// Database methods
impl Cipher {
pub async fn to_json(&self, host: &str, user_uuid: &str, conn: &DbConn) -> Value {
pub async fn to_json(
&self,
host: &str,
user_uuid: &str,
cipher_sync_data: Option<&CipherSyncData>,
conn: &DbConn,
) -> Value {
use crate::util::format_date;
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
// When there are no attachments use null instead of an empty array
let attachments_json = if attachments.is_empty() {
Value::Null
let mut attachments_json: Value = Value::Null;
if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
}
} else {
attachments.iter().map(|c| c.to_json(host)).collect()
};
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
if !attachments.is_empty() {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
}
}
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
let password_history_json =
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, conn).await {
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
Some((ro, hp)) => (ro, hp),
None => {
error!("Cipher ownership assertion failure");
@@ -109,7 +117,7 @@ impl Cipher {
// If not passing an empty object, mobile clients will crash.
let mut type_data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({}));
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
if self.atype == 1 {
if type_data_json["Uris"].is_array() {
@@ -124,13 +132,23 @@ impl Cipher {
// Clone the type_data and add some default value.
let mut data_json = type_data_json.clone();
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// data_json should always contain the following keys with every atype
data_json["Fields"] = json!(fields_json);
data_json["Name"] = json!(self.name);
data_json["Notes"] = json!(self.notes);
data_json["PasswordHistory"] = json!(password_history_json);
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
Cow::from(cipher_collections)
} else {
Cow::from(Vec::with_capacity(0))
}
} else {
Cow::from(self.get_collections(user_uuid, conn).await)
};
// There are three types of cipher response models in upstream
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
// of increasing level of detail). vaultwarden currently only
@@ -144,8 +162,8 @@ impl Cipher {
"Type": self.atype,
"RevisionDate": format_date(&self.updated_at),
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
"FolderId": self.get_folder_uuid(user_uuid, conn).await,
"Favorite": self.is_favorite(user_uuid, conn).await,
"FolderId": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string() ) } else { self.get_folder_uuid(user_uuid, conn).await },
"Favorite": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_favorites.contains(&self.uuid) } else { self.is_favorite(user_uuid, conn).await },
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
"OrganizationId": self.organization_uuid,
"Attachments": attachments_json,
@@ -154,7 +172,7 @@ impl Cipher {
"OrganizationUseTotp": true,
// This field is specific to the cipherDetails type.
"CollectionIds": self.get_collections(user_uuid, conn).await,
"CollectionIds": collection_ids,
"Name": self.name,
"Notes": self.notes,
@@ -318,13 +336,21 @@ impl Cipher {
}
/// Returns whether this cipher is owned by an org in which the user has full access.
pub async fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool {
pub async fn is_in_full_access_org(
&self,
user_uuid: &str,
cipher_sync_data: Option<&CipherSyncData>,
conn: &DbConn,
) -> bool {
if let Some(ref org_uuid) = self.organization_uuid {
if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await {
if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(cached_user_org) = cipher_sync_data.user_organizations.get(org_uuid) {
return cached_user_org.has_full_access();
}
} else if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await {
return user_org.has_full_access();
}
}
false
}
@@ -333,18 +359,62 @@ impl Cipher {
/// not in any collection the user has access to. Otherwise, the user has
/// access to this cipher, and Some(read_only, hide_passwords) represents
/// the access restrictions.
pub async fn get_access_restrictions(&self, user_uuid: &str, conn: &DbConn) -> Option<(bool, bool)> {
pub async fn get_access_restrictions(
&self,
user_uuid: &str,
cipher_sync_data: Option<&CipherSyncData>,
conn: &DbConn,
) -> Option<(bool, bool)> {
// Check whether this cipher is directly owned by the user, or is in
// a collection that the user has full access to. If so, there are no
// access restrictions.
if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, conn).await {
if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, cipher_sync_data, conn).await {
return Some((false, false));
}
let rows = if let Some(cipher_sync_data) = cipher_sync_data {
let mut rows: Vec<(bool, bool)> = Vec::new();
if let Some(collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
for collection in collections {
if let Some(uc) = cipher_sync_data.user_collections.get(collection) {
rows.push((uc.read_only, uc.hide_passwords));
}
}
}
rows
} else {
self.get_collections_access_flags(user_uuid, conn).await
};
if rows.is_empty() {
// This cipher isn't in any collections accessible to the user.
return None;
}
// A cipher can be in multiple collections with inconsistent access flags.
// For example, a cipher could be in one collection where the user has
// read-only access, but also in another collection where the user has
// read/write access. For a flag to be in effect for a cipher, upstream
// requires all collections the cipher is in to have that flag set.
// Therefore, we do a boolean AND of all values in each of the `read_only`
// and `hide_passwords` columns. This could ideally be done as part of the
// query, but Diesel doesn't support a min() or bool_and() function on
// booleans and this behavior isn't portable anyway.
let mut read_only = true;
let mut hide_passwords = true;
for (ro, hp) in rows.iter() {
read_only &= ro;
hide_passwords &= hp;
}
Some((read_only, hide_passwords))
}
pub async fn get_collections_access_flags(&self, user_uuid: &str, conn: &DbConn) -> Vec<(bool, bool)> {
db_run! {conn: {
// Check whether this cipher is in any collections accessible to the
// user. If so, retrieve the access flags for each collection.
let rows = ciphers::table
ciphers::table
.filter(ciphers::uuid.eq(&self.uuid))
.inner_join(ciphers_collections::table.on(
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
@@ -353,42 +423,19 @@ impl Cipher {
.and(users_collections::user_uuid.eq(user_uuid))))
.select((users_collections::read_only, users_collections::hide_passwords))
.load::<(bool, bool)>(conn)
.expect("Error getting access restrictions");
if rows.is_empty() {
// This cipher isn't in any collections accessible to the user.
return None;
}
// A cipher can be in multiple collections with inconsistent access flags.
// For example, a cipher could be in one collection where the user has
// read-only access, but also in another collection where the user has
// read/write access. For a flag to be in effect for a cipher, upstream
// requires all collections the cipher is in to have that flag set.
// Therefore, we do a boolean AND of all values in each of the `read_only`
// and `hide_passwords` columns. This could ideally be done as part of the
// query, but Diesel doesn't support a min() or bool_and() function on
// booleans and this behavior isn't portable anyway.
let mut read_only = true;
let mut hide_passwords = true;
for (ro, hp) in rows.iter() {
read_only &= ro;
hide_passwords &= hp;
}
Some((read_only, hide_passwords))
.expect("Error getting access restrictions")
}}
}
pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
match self.get_access_restrictions(user_uuid, conn).await {
match self.get_access_restrictions(user_uuid, None, conn).await {
Some((read_only, _hide_passwords)) => !read_only,
None => false,
}
}
pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
self.get_access_restrictions(user_uuid, conn).await.is_some()
self.get_access_restrictions(user_uuid, None, conn).await.is_some()
}
// Returns whether this cipher is a favorite of the specified user.
@@ -563,4 +610,32 @@ impl Cipher {
.load::<String>(conn).unwrap_or_default()
}}
}
/// Return a Vec with (cipher_uuid, collection_uuid)
/// This is used during a full sync so we only need one query for all collections accessible.
pub async fn get_collections_with_cipher_by_user(user_id: &str, conn: &DbConn) -> Vec<(String, String)> {
db_run! {conn: {
ciphers_collections::table
.inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid)
))
.inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid).and(
users_organizations::user_uuid.eq(user_id)
)
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
users_collections::user_uuid.eq(user_id)
)
))
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
users_organizations::access_all.eq(true).or( // User has access all
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
)
))
.select(ciphers_collections::all_columns)
.load::<(String, String)>(conn).unwrap_or_default()
}}
}
}

View File

@@ -1,11 +1,10 @@
use serde_json::Value;
use super::{Cipher, Organization, User, UserOrgStatus, UserOrgType, UserOrganization};
use super::{User, UserOrgStatus, UserOrgType, UserOrganization};
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "collections"]
#[belongs_to(Organization, foreign_key = "org_uuid")]
#[primary_key(uuid)]
pub struct Collection {
pub uuid: String,
@@ -13,10 +12,8 @@ db_object! {
pub name: String,
}
#[derive(Identifiable, Queryable, Insertable, Associations)]
#[derive(Identifiable, Queryable, Insertable)]
#[table_name = "users_collections"]
#[belongs_to(User, foreign_key = "user_uuid")]
#[belongs_to(Collection, foreign_key = "collection_uuid")]
#[primary_key(user_uuid, collection_uuid)]
pub struct CollectionUser {
pub user_uuid: String,
@@ -25,10 +22,8 @@ db_object! {
pub hide_passwords: bool,
}
#[derive(Identifiable, Queryable, Insertable, Associations)]
#[derive(Identifiable, Queryable, Insertable)]
#[table_name = "ciphers_collections"]
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
#[belongs_to(Collection, foreign_key = "collection_uuid")]
#[primary_key(cipher_uuid, collection_uuid)]
pub struct CollectionCipher {
pub cipher_uuid: String,
@@ -57,11 +52,32 @@ impl Collection {
})
}
pub async fn to_json_details(&self, user_uuid: &str, conn: &DbConn) -> Value {
pub async fn to_json_details(
&self,
user_uuid: &str,
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
conn: &DbConn,
) -> Value {
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
Some(uo) if uo.has_full_access() => (false, false),
Some(_) => {
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
(uc.read_only, uc.hide_passwords)
} else {
(false, false)
}
}
_ => (true, true),
}
} else {
(!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
};
let mut json_object = self.to_json();
json_object["Object"] = json!("collectionDetails");
json_object["ReadOnly"] = json!(!self.is_writable_by_user(user_uuid, conn).await);
json_object["HidePasswords"] = json!(self.hide_passwords_for_user(user_uuid, conn).await);
json_object["ReadOnly"] = json!(read_only);
json_object["HidePasswords"] = json!(hide_passwords);
json_object
}
}
@@ -374,6 +390,17 @@ impl CollectionUser {
}}
}
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_collections::table
.filter(users_collections::user_uuid.eq(user_uuid))
.select(users_collections::all_columns)
.load::<CollectionUserDb>(conn)
.expect("Error loading users_collections")
.from_db()
}}
}
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
User::update_uuid_revision(&collection.user_uuid, conn).await;

View File

@@ -1,13 +1,11 @@
use chrono::{NaiveDateTime, Utc};
use super::User;
use crate::CONFIG;
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "devices"]
#[changeset_options(treat_none_as_null="true")]
#[belongs_to(User, foreign_key = "user_uuid")]
#[primary_key(uuid, user_uuid)]
pub struct Device {
pub uuid: String,

View File

@@ -4,10 +4,9 @@ use serde_json::Value;
use super::User;
db_object! {
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "emergency_access"]
#[changeset_options(treat_none_as_null="true")]
#[belongs_to(User, foreign_key = "grantor_uuid")]
#[primary_key(uuid)]
pub struct EmergencyAccess {
pub uuid: String,

View File

@@ -1,10 +1,8 @@
use super::{Cipher, User};
use super::User;
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations)]
#[derive(Identifiable, Queryable, Insertable)]
#[table_name = "favorites"]
#[belongs_to(User, foreign_key = "user_uuid")]
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
#[primary_key(user_uuid, cipher_uuid)]
pub struct Favorite {
pub user_uuid: String,
@@ -80,4 +78,16 @@ impl Favorite {
.map_res("Error removing favorites by user")
}}
}
/// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers
/// This is used during a full sync so we only need one query for all favorite cipher matches.
pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &DbConn) -> Vec<String> {
db_run! { conn: {
favorites::table
.filter(favorites::user_uuid.eq(user_uuid))
.select(favorites::cipher_uuid)
.load::<String>(conn)
.unwrap_or_default()
}}
}
}

View File

@@ -1,12 +1,11 @@
use chrono::{NaiveDateTime, Utc};
use serde_json::Value;
use super::{Cipher, User};
use super::User;
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "folders"]
#[belongs_to(User, foreign_key = "user_uuid")]
#[primary_key(uuid)]
pub struct Folder {
pub uuid: String,
@@ -16,10 +15,8 @@ db_object! {
pub name: String,
}
#[derive(Identifiable, Queryable, Insertable, Associations)]
#[derive(Identifiable, Queryable, Insertable)]
#[table_name = "folders_ciphers"]
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
#[belongs_to(Folder, foreign_key = "folder_uuid")]
#[primary_key(cipher_uuid, folder_uuid)]
pub struct FolderCipher {
pub cipher_uuid: String,
@@ -215,4 +212,17 @@ impl FolderCipher {
.from_db()
}}
}
/// Return a vec with (cipher_uuid, folder_uuid)
/// This is used during a full sync so we only need one query for all folder matches.
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<(String, String)> {
db_run! { conn: {
folders_ciphers::table
.inner_join(folders::table)
.filter(folders::user_uuid.eq(user_uuid))
.select(folders_ciphers::all_columns)
.load::<(String, String)>(conn)
.unwrap_or_default()
}}
}
}

View File

@@ -6,12 +6,11 @@ use crate::db::DbConn;
use crate::error::MapResult;
use crate::util::UpCase;
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
use super::{UserOrgStatus, UserOrgType, UserOrganization};
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[table_name = "org_policies"]
#[belongs_to(Organization, foreign_key = "org_uuid")]
#[primary_key(uuid)]
pub struct OrgPolicy {
pub uuid: String,

View File

@@ -547,6 +547,15 @@ impl UserOrganization {
}}
}
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.load::<UserOrganizationDb>(conn)
.expect("Error loading user organizations").from_db()
}}
}
pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table

Some files were not shown because too many files have changed in this diff Show More