Compare commits

..

2 Commits

Author SHA1 Message Date
Miroslav Prasil ebb30229f1 Add Image information 2018-06-01 15:17:09 +01:00
Miroslav Prasil 936af5431a Update readme for docker hub 2018-06-01 15:17:09 +01:00
36 changed files with 987 additions and 2456 deletions
+5 -35
View File
@@ -1,43 +1,13 @@
## Bitwarden_RS Configuration File
## Uncomment any of the following lines to change the defaults
## Main data folder
# DATA_FOLDER=data
## Individual folders, these override %DATA_FOLDER%
# DATABASE_URL=data/db.sqlite3
# RSA_KEY_FILENAME=data/rsa_key
# PRIVATE_RSA_KEY=data/private_rsa_key.der
# PUBLIC_RSA_KEY=data/public_rsa_key.der
# ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments
## Web vault settings
# WEB_VAULT_FOLDER=web-vault/
# WEB_VAULT_ENABLED=true
# true for yes, anything else for no
SIGNUPS_ALLOWED=true
## Controls if new users can register
# SIGNUPS_ALLOWED=true
## Use a local favicon extractor
## Set to false to use bitwarden's official icon servers
## Set to true to use the local version, which is not as smart,
## but it doesn't send the cipher domains to bitwarden's servers
# LOCAL_ICON_EXTRACTOR=false
## Controls the PBBKDF password iterations to apply on the server
## The change only applies when the password is changed
# PASSWORD_ITERATIONS=100000
## Whether password hint should be sent into the error response when the client request it
# SHOW_PASSWORD_HINT=true
## Domain settings
## The domain must match the address from where you access the server
## Unless you are using U2F, or having problems with attachments not downloading, there is no need to change this
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
# DOMAIN=https://bw.domain.tld:8443
## Rocket specific settings, check Rocket documentation to learn more
# ROCKET_ENV=staging
# ROCKET_ENV=production
# ROCKET_ADDRESS=0.0.0.0 # Enable this to test mobile app
# ROCKET_PORT=8000
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
+26 -26
View File
@@ -1,23 +1,25 @@
# Build instructions
## How to compile bitwarden_rs
Install `rust nightly`, in Windows the recommended way is through `rustup`.
## Dependencies
- `Rust nightly` (strongly recommended to use [rustup](https://rustup.rs/))
- `OpenSSL` (should be available in path, install through your system's package manager or use the [prebuilt binaries](https://wiki.openssl.org/index.php/Binaries))
- `NodeJS` (required to build the web-vault, (install through your system's package manager or use the [prebuilt binaries](https://nodejs.org/en/download/))
Install the `openssl` library, in Windows the best option is Microsoft's `vcpkg`,
on other systems use their respective package managers.
## Run/Compile
Then run:
```sh
# Compile and run
cargo run
# or just compile (binary located in target/release/bitwarden_rs)
cargo build --release
# or
cargo build
```
When run, the server is accessible in [http://localhost:80](http://localhost:80).
## How to install the web-vault locally
If you're using docker image, you can just update `VAULT_VERSION` variable in Dockerfile and rebuild the image.
### Install the web-vault
Download the latest official release from the [releases page](https://github.com/bitwarden/web/releases) and extract it.
Install `node.js` and either `yarn` or `npm` (usually included with node)
Clone the web-vault outside the project:
```
git clone https://github.com/bitwarden/web.git web-vault
```
Modify `web-vault/settings.Production.json` to look like this:
```json
@@ -32,23 +34,23 @@ Modify `web-vault/settings.Production.json` to look like this:
}
```
Then, run the following from the `web-vault` directory:
Then, run the following from the `web-vault` dir:
```sh
# With yarn (recommended)
yarn
yarn gulp dist:selfHosted
# With npm
npm install
npx gulp dist:selfHosted
```
Finally copy the contents of the `web-vault/dist` folder into the `bitwarden_rs/web-vault` folder.
# Configuration
The available configuration options are documented in the default `.env` file, and they can be modified by uncommenting the desired options in that file or by setting their respective environment variables. Look at the README file for the main configuration options available.
Note: the environment variables override the values set in the `.env` file.
## How to recreate database schemas (for developers)
## How to recreate database schemas
Install diesel-cli with cargo:
```sh
cargo install diesel_cli --no-default-features --features sqlite-bundled
cargo install diesel_cli --no-default-features --features sqlite-bundled # Or use only sqlite to use the system version
```
Make sure that the correct path to the database is in the `.env` file.
@@ -61,9 +63,7 @@ diesel migration generate <name>
Modify the *.sql files, making sure that any changes are reverted in the down.sql file.
Apply the migrations and save the generated schemas as follows:
```sh
diesel migration redo
# This step should be done automatically when using diesel-cli > 1.3.0
# diesel print-schema > src/db/schema.rs
```
diesel migration redo
diesel print-schema > src/db/schema.rs
```
Generated
+386 -419
View File
File diff suppressed because it is too large Load Diff
+13 -24
View File
@@ -1,28 +1,28 @@
[package]
name = "bitwarden_rs"
version = "1.0.0"
version = "0.9.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
[dependencies]
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
rocket = { version = "0.3.15", features = ["tls"] }
rocket_codegen = "0.3.15"
rocket_contrib = "0.3.15"
rocket = { version = "0.3.12", features = ["tls"] }
rocket_codegen = "0.3.12"
rocket_contrib = "0.3.12"
# HTTP client
reqwest = "0.8.6"
# multipart/form-data support
multipart = "0.15.0"
multipart = "0.14.2"
# A generic serialization/deserialization framework
serde = "1.0.70"
serde_derive = "1.0.70"
serde_json = "1.0.24"
serde = "1.0.64"
serde_derive = "1.0.64"
serde_json = "1.0.19"
# A safe, extensible ORM and Query builder
diesel = { version = "1.3.2", features = ["sqlite", "chrono", "r2d2"] }
diesel_migrations = { version = "1.3.0", features = ["sqlite"] }
diesel = { version = "~1.2.2", features = ["sqlite", "chrono", "r2d2"] }
diesel_migrations = { version = "~1.2.0", features = ["sqlite"] }
# Bundled SQLite
libsqlite3-sys = { version = "0.9.1", features = ["bundled"] }
@@ -34,7 +34,7 @@ ring = { version = "= 0.11.0", features = ["rsa_signing"] }
uuid = { version = "0.6.5", features = ["v4"] }
# Date and time library for Rust
chrono = "0.4.4"
chrono = "0.4.2"
# TOTP library
oath = "0.10.2"
@@ -45,22 +45,11 @@ data-encoding = "2.1.1"
# JWT library
jsonwebtoken = "= 4.0.1"
# U2F library
u2f = "0.1.2"
# A `dotenv` implementation for Rust
dotenv = { version = "0.13.0", default-features = false }
# Lazy static macro
lazy_static = "1.0.2"
# Numerical libraries
num-traits = "0.2.5"
num-derive = "0.2.2"
lazy_static = "1.0.1"
[patch.crates-io]
# Make jwt use ring 0.11, to match rocket
jsonwebtoken = { path = "libs/jsonwebtoken" }
# Version 0.1.2 from crates.io lacks a commit that fixes a certificate error
u2f = { git = 'https://github.com/wisespace-io/u2f-rs', rev = '193de35093a44' }
jsonwebtoken = { path = "libs/jsonwebtoken" } # Make jwt use ring 0.11, to match rocket
+20 -20
View File
@@ -2,32 +2,36 @@
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
FROM node:8-alpine as vault
FROM node:9-alpine as vault
ENV VAULT_VERSION "v2.1.1"
ENV URL "https://github.com/bitwarden/web.git"
ENV VAULT_VERSION "1.26.0"
ENV URL "https://github.com/bitwarden/web/archive/v${VAULT_VERSION}.tar.gz"
RUN apk add --update-cache --upgrade \
curl \
git \
tar
tar \
&& npm install -g \
gulp-cli \
gulp
RUN mkdir /web-build \
&& cd /web-build \
&& curl -L "${URL}" | tar -xvz --strip-components=1
RUN git clone -b $VAULT_VERSION --depth 1 $URL web-build
WORKDIR /web-build
COPY /docker/set-vault-baseurl.patch /web-build/
RUN git apply set-vault-baseurl.patch
COPY /docker/settings.Production.json /web-build/
RUN npm run sub:init && npm install
RUN npm run dist \
&& mv build /web-vault
RUN git config --global url."https://github.com/".insteadOf ssh://git@github.com/ \
&& npm install \
&& gulp dist:selfHosted \
&& mv dist /web-vault
########################## BUILD IMAGE ##########################
# We need to use the Rust build image, because
# we need the Rust compiler and Cargo tooling
FROM rust as build
FROM rustlang/rust:nightly as build
# Using bundled SQLite, no need to install it
# RUN apt-get update && apt-get install -y\
@@ -42,7 +46,6 @@ WORKDIR /app
# Copies over *only* your manifests and vendored dependencies
COPY ./Cargo.* ./
COPY ./libs ./libs
COPY ./rust-toolchain ./rust-toolchain
# Builds your dependencies and removes the
# dummy project, except the target folder
@@ -63,13 +66,9 @@ RUN cargo build --release
# because we already have a binary built
FROM debian:stretch-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_WORKERS=10
# Install needed libraries
RUN apt-get update && apt-get install -y\
openssl\
ca-certificates\
--no-install-recommends\
&& rm -rf /var/lib/apt/lists/*
@@ -80,9 +79,10 @@ EXPOSE 80
# Copies the files from the context (env file and web-vault)
# and the binary from the "build" stage to the current stage
COPY .env .
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build app/target/release/bitwarden_rs .
# Configures the startup!
CMD ./bitwarden_rs
# Use production to disable Rocket logging
#CMD ROCKET_ENV=production ./bitwarden_rs
CMD ROCKET_ENV=staging ./bitwarden_rs
+2 -217
View File
@@ -4,42 +4,6 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
_*Note, that this project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC._
**Table of contents**
- [Features](#features)
- [Missing features](#missing-features)
- [Docker image usage](#docker-image-usage)
- [Starting a container](#starting-a-container)
- [Updating the bitwarden image](#updating-the-bitwarden-image)
- [Configuring bitwarden service](#configuring-bitwarden-service)
- [Disable registration of new users](#disable-registration-of-new-users)
- [Enabling HTTPS](#enabling-https)
- [Enabling U2F authentication](#enabling-u2f-authentication)
- [Changing persistent data location](#changing-persistent-data-location)
- [/data prefix:](#data-prefix)
- [database name and location](#database-name-and-location)
- [attachments location](#attachments-location)
- [icons cache](#icons-cache)
- [Changing the API request size limit](#changing-the-api-request-size-limit)
- [Changing the number of workers](#changing-the-number-of-workers)
- [Disabling or overriding the Vault interface hosting](#disabling-or-overriding-the-vault-interface-hosting)
- [Other configuration](#other-configuration)
- [Building your own image](#building-your-own-image)
- [Building binary](#building-binary)
- [Available packages](#available-packages)
- [Arch Linux](#arch-linux)
- [Backing up your vault](#backing-up-your-vault)
- [1. the sqlite3 database](#1-the-sqlite3-database)
- [2. the attachments folder](#2-the-attachments-folder)
- [3. the key files](#3-the-key-files)
- [4. Icon Cache](#4-icon-cache)
- [Running the server with non-root user](#running-the-server-with-non-root-user)
- [Differences from upstream API implementation](#differences-from-upstream-api-implementation)
- [Changing user email](#changing-user-email)
- [Creating organization](#creating-organization)
- [Inviting users into organization](#inviting-users-into-organization)
- [Get in touch](#get-in-touch)
## Features
Basically full implementation of Bitwarden API is provided including:
@@ -50,14 +14,6 @@ Basically full implementation of Bitwarden API is provided including:
* Vault API support
* Serving the static files for Vault interface
* Website icons API
* Authenticator and U2F support
## Missing features
* Email confirmation
* Other two-factor systems:
* YubiKey OTP (if your key supports U2F, you can use that)
* Duo
* Email codes
## Docker image usage
@@ -88,7 +44,6 @@ docker rm bitwarden
# Start new container with the data mounted
docker run -d --name bitwarden -v /bw-data/:/data/ -p 80:80 mprasil/bitwarden:latest
```
Then visit [http://localhost:80](http://localhost:80)
In case you didn't bind mount the volume for persistent data, you need an intermediate step where you preserve the data with an intermediate container:
@@ -114,55 +69,6 @@ docker rm bitwarden_data
## Configuring bitwarden service
### Disable registration of new users
By default new users can register, if you want to disable that, set the `SIGNUPS_ALLOWED` env variable to `false`:
```sh
docker run -d --name bitwarden \
-e SIGNUPS_ALLOWED=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Enabling HTTPS
To enable HTTPS, you need to configure the `ROCKET_TLS`.
The values to the option must follow the format:
```
ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
```
Where:
- certs: a path to a certificate chain in PEM format
- key: a path to a private key file in PEM format for the certificate in certs
```sh
docker run -d --name bitwarden \
-e ROCKET_TLS='{certs="/ssl/certs.pem",key="/ssl/key.pem"}' \
-v /ssl/keys/:/ssl/ \
-v /bw-data/:/data/ \
-p 443:80 \
mprasil/bitwarden:latest
```
Note that you need to mount ssl files and you need to forward appropriate port.
### Enabling U2F authentication
To enable U2F authentication, you must be serving bitwarden_rs from an HTTPS domain with a valid certificate (Either using the included
HTTPS options or with a reverse proxy). We recommend using a free certificate from Let's Encrypt.
After that, you need to set the `DOMAIN` environment variable to the same address from where bitwarden_rs is being served:
```sh
docker run -d --name bitwarden \
-e DOMAIN=https://bw.domain.tld \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note that the value has to include the `https://` and it may include a port at the end (in the format of `https://bw.domain.tld:port`) when not using `443`.
### Changing persistent data location
#### /data prefix:
@@ -222,59 +128,7 @@ docker run -d --name bitwarden \
mprasil/bitwarden:latest
```
Note, that in the above example we don't mount the volume locally, which means it won't be persisted during the upgrade unless you use intermediate data container using `--volumes-from`. This will impact performance as bitwarden will have to re-download the icons on restart, but might save you from having stale icons in cache as they are not automatically cleaned.
### Changing the API request size limit
By default the API calls are limited to 10MB. This should be sufficient for most cases, however if you want to support large imports, this might be limiting you. On the other hand you might want to limit the request size to something smaller than that to prevent API abuse and possible DOS attack, especially if running with limited resources.
To set the limit, you can use the `ROCKET_LIMITS` variable. Example here shows 10MB limit for posted json in the body (this is the default):
```sh
docker run -d --name bitwarden \
-e ROCKET_LIMITS={json=10485760} \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Changing the number of workers
When you run bitwarden_rs, it spawns `2 * <number of cpu cores>` workers to handle requests. On some systems this might lead to low number of workers and hence slow performance, so the default in the docker image is changed to spawn 10 threads. You can override this setting to increase or decrease the number of workers by setting the `ROCKET_WORKERS` variable.
In the example bellow, we're starting with 20 workers:
```sh
docker run -d --name bitwarden \
-e ROCKET_WORKERS=20 \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
### Disabling or overriding the Vault interface hosting
As a convenience bitwarden_rs image will also host static files for Vault web interface. You can disable this static file hosting completely by setting the WEB_VAULT_ENABLED variable.
```sh
docker run -d --name bitwarden \
-e WEB_VAULT_ENABLED=false \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Alternatively you can override the Vault files and provide your own static files to host. You can do that by mounting a path with your files over the `/web-vault` directory in the container. Just make sure the directory contains at least `index.html` file.
```sh
docker run -d --name bitwarden \
-v /path/to/static/files_directory:/web-vault \
-v /bw-data/:/data/ \
-p 80:80 \
mprasil/bitwarden:latest
```
Note that you can also change the path where bitwarden_rs looks for static files by providing the `WEB_VAULT_FOLDER` environment variable with the path.
Note, that in the above example we don't mount the volume locally, which means it won't be persisted during the upgrade unless you use intermediate data container using `--volumes-from`. This will impact performance as bitwarden will have to re-dowload the icons on restart, but might save you from having stale icons in cache as they are not automatically cleaned.
### Other configuration
@@ -291,73 +145,4 @@ docker build -t bitwarden_rs .
## Building binary
For building binary outside the Docker environment and running it locally without docker, please see [build instructions](BUILD.md).
## Available packages
### Arch Linux
Bitwarden_rs is already packaged for Archlinux thanks to @mqus. There is an [AUR package](https://aur.archlinux.org/packages/bitwarden_rs) (optionally with the [vault web interface](https://aur.archlinux.org/packages/bitwarden_rs-vault/) ) available.
## Backing up your vault
### 1. the sqlite3 database
The sqlite3 database should be backed up using the proper sqlite3 backup command. This will ensure the database does not become corrupted if the backup happens during a database write.
```
sqlite3 /$DATA_FOLDER/db.sqlite3 ".backup '/$DATA_FOLDER/db-backup/backup.sq3'"
```
This command can be run via a CRON job everyday, however note that it will overwrite the same backup.sq3 file each time. This backup file should therefore be saved via incremental backup either using a CRON job command that appends a timestamp or from another backup app such as Duplicati.
### 2. the attachments folder
By default, this is located in `$DATA_FOLDER/attachments`
### 3. the key files
This is optional, these are only used to store tokens of users currently logged in, deleting them would simply log each user out forcing them to log in again. By default, these are located in the `$DATA_FOLDER` (by default /data in the docker). There are 3 files: rsa_key.der, rsa_key.pem, rsa_key.pub.der.
### 4. Icon Cache
This is optional, the icon cache can re-download itself however if you have a large cache, it may take a long time. By default it is located in `$DATA_FOLDER/icon_cache`
## Running the server with non-root user
The root user inside the container is already pretty limited in what it can do, so the default setup should be secure enough. However if you wish to go the extra mile to avoid using root even in container, here's how you can do that:
1. Create a data folder that's owned by non-root user, so you can use that user to write persistent data. Get the user `id`. In linux you can run `stat <folder_name>` to get/verify the owner ID.
2. When you run the container, you need to provide the user ID as one of the parameters. Note that this needs to be in the numeric form and not the user name, because docker would try to find such user defined inside the image, which would likely not be there or it would have different ID than your local user and hence wouldn't be able to write the persistent data. This can be done with the `--user` parameter.
3. bitwarden_rs listens on port `80` inside the container by default, this [won't work with non-root user](https://www.w3.org/Daemon/User/Installation/PrivilegedPorts.html), because regular users aren't allowed to open port bellow `1024`. To overcome this, you need to configure server to listen on a different port, you can use `ROCKET_PORT` to do that.
Here's sample docker run, that uses user with id `1000` and with the port redirection configured, so that inside container the service is listening on port `8080` and docker translates that to external (host) port `80`:
```sh
docker run -d --name bitwarden \
--user 1000 \
-e ROCKET_PORT=8080 \
-v /bw-data/:/data/ \
-p 80:8080 \
mprasil/bitwarden:latest
```
## Differences from upstream API implementation
### Changing user email
Because we don't have any SMTP functionality at the moment, there's no way to deliver the verification token when you try to change the email. User just needs to enter any random token to continue and the change will be applied.
### Creating organization
We use upstream Vault interface directly without any (significant) changes, this is why user is presented with paid options when creating organization. To create an organization, just use the free option, none of the limits apply when using bitwarden_rs as back-end API and after the organization is created it should behave like Enterprise organization.
### Inviting users into organization
The users must already be registered on your server to invite them, because we can't send the invitation via email. The invited users won't get the invitation email, instead they will appear in the interface as if they already accepted the invitation. Organization admin then just needs to confirm them to be proper Organization members and to give them access to the shared secrets.
## Get in touch
To ask an question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine, also please report any bugs spotted here.
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
For building binary outside the Docker environment and running it locally without docker, please see [build instructions](BUILD.md).
-2
View File
@@ -1,2 +0,0 @@
[global.limits]
json = 10485760 # 10 MiB
-5
View File
@@ -1,5 +0,0 @@
# For documentation on how to configure this file,
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/db/schema.rs"
-23
View File
@@ -1,23 +0,0 @@
--- a/src/app/services/services.module.ts
+++ b/src/app/services/services.module.ts
@@ -116,17 +116,15 @@ const exportService = new ExportService(folderService, cipherService, apiService
const importService = new ImportService(cipherService, folderService, apiService, i18nService, collectionService);
const auditService = new AuditService(cryptoFunctionService, apiService);
-const analytics = new Analytics(window, () => platformUtilsService.isDev() || platformUtilsService.isSelfHost(),
+const analytics = new Analytics(window, () => platformUtilsService.isDev() || platformUtilsService.isSelfHost() || true,
platformUtilsService, storageService, appIdService);
containerService.attachToWindow(window);
export function initFactory(): Function {
return async () => {
await (storageService as HtmlStorageService).init();
- const isDev = platformUtilsService.isDev();
- if (!isDev && platformUtilsService.isSelfHost()) {
- environmentService.baseUrl = window.location.origin;
- }
+ const isDev = false;
+ environmentService.baseUrl = window.location.origin;
await apiService.setUrls({
base: isDev ? null : window.location.origin,
api: isDev ? 'http://localhost:4000' : null,
+9
View File
@@ -0,0 +1,9 @@
{
"appSettings": {
"apiUri": "/api",
"identityUri": "/identity",
"iconsUri": "/icons",
"stripeKey": "",
"braintreeKey": ""
}
}
@@ -1,8 +0,0 @@
UPDATE users
SET totp_secret = (
SELECT twofactor.data FROM twofactor
WHERE twofactor.type = 0
AND twofactor.user_uuid = users.uuid
);
DROP TABLE twofactor;
@@ -1,15 +0,0 @@
CREATE TABLE twofactor (
uuid TEXT NOT NULL PRIMARY KEY,
user_uuid TEXT NOT NULL REFERENCES users (uuid),
type INTEGER NOT NULL,
enabled BOOLEAN NOT NULL,
data TEXT NOT NULL,
UNIQUE (user_uuid, type)
);
INSERT INTO twofactor (uuid, user_uuid, type, enabled, data)
SELECT lower(hex(randomblob(16))) , uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL;
UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty
-1
View File
@@ -1 +0,0 @@
nightly-2018-07-18
+15 -90
View File
@@ -3,9 +3,11 @@ use rocket_contrib::Json;
use db::DbConn;
use db::models::*;
use api::{PasswordData, JsonResult, EmptyResult, JsonUpcase, NumberOrString};
use api::{PasswordData, JsonResult, EmptyResult, JsonUpcase};
use auth::Headers;
use util;
use CONFIG;
#[derive(Deserialize, Debug)]
@@ -13,6 +15,7 @@ use CONFIG;
struct RegisterData {
Email: String,
Key: String,
#[serde(deserialize_with = "util::upcase_deserialize")]
Keys: Option<KeysData>,
MasterPasswordHash: String,
MasterPasswordHint: Option<String>,
@@ -31,10 +34,10 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
let data: RegisterData = data.into_inner().data;
if !CONFIG.signups_allowed {
err!("Signups not allowed")
err!(format!("Signups not allowed"))
}
if User::find_by_mail(&data.Email, &conn).is_some() {
if let Some(_) = User::find_by_mail(&data.Email, &conn) {
err!("Email already exists")
}
@@ -64,33 +67,6 @@ fn profile(headers: Headers, conn: DbConn) -> JsonResult {
Ok(Json(headers.user.to_json(&conn)))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct ProfileData {
#[serde(rename = "Culture")]
_Culture: String, // Ignored, always use en-US
MasterPasswordHint: Option<String>,
Name: String,
}
#[put("/accounts/profile", data = "<data>")]
fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
post_profile(data, headers, conn)
}
#[post("/accounts/profile", data = "<data>")]
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: ProfileData = data.into_inner().data;
let mut user = headers.user;
user.name = data.Name;
user.password_hint = data.MasterPasswordHint;
user.save(&conn);
Ok(Json(user.to_json(&conn)))
}
#[get("/users/<uuid>/public-key")]
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
let user = match User::find_by_uuid(&uuid, &conn) {
@@ -158,41 +134,15 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct EmailTokenData {
MasterPasswordHash: String,
NewEmail: String,
}
#[post("/accounts/email-token", data = "<data>")]
fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: EmailTokenData = data.into_inner().data;
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password")
}
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
err!("Email already in use");
}
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ChangeEmailData {
MasterPasswordHash: String,
NewEmail: String,
Key: String,
NewMasterPasswordHash: String,
#[serde(rename = "Token")]
_Token: NumberOrString,
}
#[post("/accounts/email", data = "<data>")]
#[post("/accounts/email-token", data = "<data>")]
fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: ChangeEmailData = data.into_inner().data;
let mut user = headers.user;
@@ -206,10 +156,6 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
}
user.email = data.NewEmail;
user.set_password(&data.NewMasterPasswordHash);
user.key = data.Key;
user.save(&conn);
Ok(())
@@ -226,15 +172,17 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
// Delete ciphers and their attachments
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn) {
if cipher.delete(&conn).is_err() {
err!("Failed deleting cipher")
match cipher.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting cipher")
}
}
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn) {
if f.delete(&conn).is_err() {
err!("Failed deleting folder")
match f.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting folder")
}
}
@@ -249,29 +197,6 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
#[get("/accounts/revision-date")]
fn revision_date(headers: Headers) -> String {
let revision_date = headers.user.updated_at.timestamp_millis();
let revision_date = headers.user.updated_at.timestamp();
revision_date.to_string()
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct PasswordHintData {
Email: String,
}
#[post("/accounts/password-hint", data = "<data>")]
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
let data: PasswordHintData = data.into_inner().data;
if !CONFIG.show_password_hint {
return Ok(())
}
match User::find_by_mail(&data.Email, &conn) {
Some(user) => {
let hint = user.password_hint.to_owned().unwrap_or_default();
err!(format!("Your password hint is: {}", hint))
},
None => Ok(()),
}
}
+87 -171
View File
File diff suppressed because it is too large Load Diff
-116
View File
@@ -646,121 +646,5 @@
"wiktionary.org"
],
"Excluded": false
},
{
"Type": 72,
"Domains": [
"airbnb.at",
"airbnb.be",
"airbnb.ca",
"airbnb.ch",
"airbnb.cl",
"airbnb.co.cr",
"airbnb.co.id",
"airbnb.co.in",
"airbnb.co.kr",
"airbnb.co.nz",
"airbnb.co.uk",
"airbnb.co.ve",
"airbnb.com",
"airbnb.com.ar",
"airbnb.com.au",
"airbnb.com.bo",
"airbnb.com.br",
"airbnb.com.bz",
"airbnb.com.co",
"airbnb.com.ec",
"airbnb.com.gt",
"airbnb.com.hk",
"airbnb.com.hn",
"airbnb.com.mt",
"airbnb.com.my",
"airbnb.com.ni",
"airbnb.com.pa",
"airbnb.com.pe",
"airbnb.com.py",
"airbnb.com.sg",
"airbnb.com.sv",
"airbnb.com.tr",
"airbnb.com.tw",
"airbnb.cz",
"airbnb.de",
"airbnb.dk",
"airbnb.es",
"airbnb.fi",
"airbnb.fr",
"airbnb.gr",
"airbnb.gy",
"airbnb.hu",
"airbnb.ie",
"airbnb.is",
"airbnb.it",
"airbnb.jp",
"airbnb.mx",
"airbnb.nl",
"airbnb.no",
"airbnb.pl",
"airbnb.pt",
"airbnb.ru",
"airbnb.se"
],
"Excluded": false
},
{
"Type": 73,
"Domains": [
"eventbrite.at",
"eventbrite.be",
"eventbrite.ca",
"eventbrite.ch",
"eventbrite.cl",
"eventbrite.co.id",
"eventbrite.co.in",
"eventbrite.co.kr",
"eventbrite.co.nz",
"eventbrite.co.uk",
"eventbrite.co.ve",
"eventbrite.com",
"eventbrite.com.au",
"eventbrite.com.bo",
"eventbrite.com.br",
"eventbrite.com.co",
"eventbrite.com.hk",
"eventbrite.com.hn",
"eventbrite.com.pe",
"eventbrite.com.sg",
"eventbrite.com.tr",
"eventbrite.com.tw",
"eventbrite.cz",
"eventbrite.de",
"eventbrite.dk",
"eventbrite.fi",
"eventbrite.fr",
"eventbrite.gy",
"eventbrite.hu",
"eventbrite.ie",
"eventbrite.is",
"eventbrite.it",
"eventbrite.jp",
"eventbrite.mx",
"eventbrite.nl",
"eventbrite.no",
"eventbrite.pl",
"eventbrite.pt",
"eventbrite.ru",
"eventbrite.se"
],
"Excluded": false
},
{
"Type": 74,
"Domains": [
"stackexchange.com",
"superuser.com",
"stackoverflow.com",
"serverfault.com",
"mathoverflow.net"
],
"Excluded": false
}
]
+10 -35
View File
@@ -2,7 +2,7 @@ mod accounts;
mod ciphers;
mod folders;
mod organizations;
pub(crate) mod two_factor;
mod two_factor;
use self::accounts::*;
use self::ciphers::*;
@@ -14,17 +14,13 @@ pub fn routes() -> Vec<Route> {
routes![
register,
profile,
put_profile,
post_profile,
get_public_keys,
post_keys,
post_password,
post_sstamp,
post_email_token,
post_email,
delete_account,
revision_date,
password_hint,
sync,
@@ -33,29 +29,20 @@ pub fn routes() -> Vec<Route> {
get_cipher_admin,
get_cipher_details,
post_ciphers,
put_cipher_admin,
post_ciphers_admin,
post_ciphers_import,
post_attachment,
post_attachment_admin,
post_attachment_share,
delete_attachment_post,
delete_attachment_post_admin,
delete_attachment,
post_cipher_admin,
post_cipher_share,
put_cipher_share,
put_cipher_share_seleted,
post_cipher,
put_cipher,
delete_cipher_post,
delete_cipher_post_admin,
delete_cipher,
delete_cipher_selected,
delete_cipher_selected_post,
delete_all,
move_cipher_selected,
move_cipher_selected_put,
get_folders,
get_folder,
@@ -68,45 +55,31 @@ pub fn routes() -> Vec<Route> {
get_twofactor,
get_recover,
recover,
disable_twofactor,
disable_twofactor_put,
generate_authenticator,
activate_authenticator,
activate_authenticator_put,
generate_u2f,
activate_u2f,
activate_u2f_put,
disable_authenticator,
get_organization,
create_organization,
delete_organization,
post_delete_organization,
leave_organization,
get_user_collections,
get_org_collections,
get_org_collection_detail,
get_collection_users,
put_organization,
post_organization,
post_organization_collections,
delete_organization_collection_user,
post_organization_collection_delete_user,
post_organization_collection_update,
put_organization_collection_update,
delete_organization_collection,
post_organization_collection_delete,
post_collections_update,
post_collections_admin,
put_collections_admin,
get_org_details,
get_org_users,
send_invite,
confirm_invite,
get_user,
edit_user,
put_organization_user,
delete_user,
post_delete_user,
clear_device_token,
put_device_token,
@@ -133,7 +106,8 @@ use auth::Headers;
#[put("/devices/identifier/<uuid>/clear-token", data = "<data>")]
fn clear_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: DbConn) -> EmptyResult {
let _data: Value = data.into_inner();
println!("UUID: {:#?}", uuid);
println!("DATA: {:#?}", data);
let device = match Device::find_by_uuid(&uuid, &conn) {
Some(device) => device,
@@ -151,8 +125,9 @@ fn clear_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: D
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
fn put_device_token(uuid: String, data: Json<Value>, headers: Headers, conn: DbConn) -> JsonResult {
let _data: Value = data.into_inner();
println!("UUID: {:#?}", uuid);
println!("DATA: {:#?}", data);
let device = match Device::find_by_uuid(&uuid, &conn) {
Some(device) => device,
None => err!("Device not found")
@@ -175,7 +150,7 @@ struct GlobalDomain {
Excluded: bool,
}
const GLOBAL_DOMAINS: &str = include_str!("global_domains.json");
const GLOBAL_DOMAINS: &'static str = include_str!("global_domains.json");
#[get("/settings/domains")]
fn get_eq_domains(headers: Headers) -> JsonResult {
@@ -210,8 +185,8 @@ struct EquivDomainData {
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> EmptyResult {
let data: EquivDomainData = data.into_inner().data;
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or(Vec::new());
let equivalent_domains = data.EquivalentDomains.unwrap_or(Vec::new());
let mut user = headers.user;
use serde_json::to_string;
+35 -100
View File
@@ -8,8 +8,6 @@ use db::models::*;
use api::{PasswordData, JsonResult, EmptyResult, NumberOrString, JsonUpcase};
use auth::{Headers, AdminHeaders, OwnerHeaders};
use serde::{Deserialize, Deserializer};
#[derive(Deserialize)]
#[allow(non_snake_case)]
@@ -19,7 +17,7 @@ struct OrgData {
Key: String,
Name: String,
#[serde(rename = "PlanType")]
_PlanType: NumberOrString, // Ignored, always use the same plan
_PlanType: String, // Ignored, always use the same plan
}
#[derive(Deserialize, Debug)]
@@ -57,7 +55,7 @@ fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn
Ok(Json(org.to_json()))
}
#[delete("/organizations/<org_id>", data = "<data>")]
#[post("/organizations/<org_id>/delete", data = "<data>")]
fn delete_organization(org_id: String, data: JsonUpcase<PasswordData>, headers: OwnerHeaders, conn: DbConn) -> EmptyResult {
let data: PasswordData = data.into_inner().data;
let password_hash = data.MasterPasswordHash;
@@ -75,34 +73,6 @@ fn delete_organization(org_id: String, data: JsonUpcase<PasswordData>, headers:
}
}
#[post("/organizations/<org_id>/delete", data = "<data>")]
fn post_delete_organization(org_id: String, data: JsonUpcase<PasswordData>, headers: OwnerHeaders, conn: DbConn) -> EmptyResult {
delete_organization(org_id, data, headers, conn)
}
#[post("/organizations/<org_id>/leave")]
fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
None => err!("User not part of organization"),
Some(user_org) => {
if user_org.type_ == UserOrgType::Owner as i32 {
let num_owners = UserOrganization::find_by_org_and_type(
&org_id, UserOrgType::Owner as i32, &conn)
.len();
if num_owners <= 1 {
err!("The last owner can't leave")
}
}
match user_org.delete(&conn) {
Ok(()) => Ok(()),
Err(_) => err!("Failed leaving the organization")
}
}
}
}
#[get("/organizations/<org_id>")]
fn get_organization(org_id: String, _headers: OwnerHeaders, conn: DbConn) -> JsonResult {
match Organization::find_by_uuid(&org_id, &conn) {
@@ -111,11 +81,6 @@ fn get_organization(org_id: String, _headers: OwnerHeaders, conn: DbConn) -> Jso
}
}
#[put("/organizations/<org_id>", data = "<data>")]
fn put_organization(org_id: String, headers: OwnerHeaders, data: JsonUpcase<OrganizationUpdateData>, conn: DbConn) -> JsonResult {
post_organization(org_id, headers, data, conn)
}
#[post("/organizations/<org_id>", data = "<data>")]
fn post_organization(org_id: String, _headers: OwnerHeaders, data: JsonUpcase<OrganizationUpdateData>, conn: DbConn) -> JsonResult {
let data: OrganizationUpdateData = data.into_inner().data;
@@ -176,11 +141,6 @@ fn post_organization_collections(org_id: String, _headers: AdminHeaders, data: J
Ok(Json(collection.to_json()))
}
#[put("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
fn put_organization_collection_update(org_id: String, col_id: String, headers: AdminHeaders, data: JsonUpcase<NewCollectionData>, conn: DbConn) -> JsonResult {
post_organization_collection_update(org_id, col_id, headers, data, conn)
}
#[post("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
fn post_organization_collection_update(org_id: String, col_id: String, _headers: AdminHeaders, data: JsonUpcase<NewCollectionData>, conn: DbConn) -> JsonResult {
let data: NewCollectionData = data.into_inner().data;
@@ -205,9 +165,8 @@ fn post_organization_collection_update(org_id: String, col_id: String, _headers:
Ok(Json(collection.to_json()))
}
#[delete("/organizations/<org_id>/collections/<col_id>/user/<org_user_id>")]
fn delete_organization_collection_user(org_id: String, col_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
#[post("/organizations/<org_id>/collections/<col_id>/delete-user/<org_user_id>")]
fn post_organization_collection_delete_user(org_id: String, col_id: String, org_user_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let collection = match Collection::find_by_uuid(&col_id, &conn) {
None => err!("Collection not found"),
Some(collection) => if collection.org_uuid == org_id {
@@ -233,13 +192,17 @@ fn delete_organization_collection_user(org_id: String, col_id: String, org_user_
}
}
#[post("/organizations/<org_id>/collections/<col_id>/delete-user/<org_user_id>")]
fn post_organization_collection_delete_user(org_id: String, col_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
delete_organization_collection_user(org_id, col_id, org_user_id, headers, conn)
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct DeleteCollectionData {
Id: String,
OrgId: String,
}
#[delete("/organizations/<org_id>/collections/<col_id>")]
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
#[post("/organizations/<org_id>/collections/<col_id>/delete", data = "<data>")]
fn post_organization_collection_delete(org_id: String, col_id: String, _headers: AdminHeaders, data: JsonUpcase<DeleteCollectionData>, conn: DbConn) -> EmptyResult {
let _data: DeleteCollectionData = data.into_inner().data;
match Collection::find_by_uuid(&col_id, &conn) {
None => err!("Collection not found"),
Some(collection) => if collection.org_uuid == org_id {
@@ -253,18 +216,6 @@ fn delete_organization_collection(org_id: String, col_id: String, _headers: Admi
}
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct DeleteCollectionData {
Id: String,
OrgId: String,
}
#[post("/organizations/<org_id>/collections/<col_id>/delete", data = "<_data>")]
fn post_organization_collection_delete(org_id: String, col_id: String, headers: AdminHeaders, _data: JsonUpcase<DeleteCollectionData>, conn: DbConn) -> EmptyResult {
delete_organization_collection(org_id, col_id, headers, conn)
}
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult {
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
@@ -334,19 +285,11 @@ fn get_org_users(org_id: String, headers: AdminHeaders, conn: DbConn) -> JsonRes
})))
}
fn deserialize_collections<'de, D>(deserializer: D) -> Result<Vec<CollectionData>, D::Error>
where
D: Deserializer<'de>,
{
// Deserialize null to empty Vec
Deserialize::deserialize(deserializer).or(Ok(vec![]))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct CollectionData {
Id: String,
ReadOnly: bool,
id: String,
readOnly: bool,
}
#[derive(Deserialize)]
@@ -354,7 +297,6 @@ struct CollectionData {
struct InviteData {
Emails: Vec<String>,
Type: NumberOrString,
#[serde(deserialize_with = "deserialize_collections")]
Collections: Vec<CollectionData>,
AccessAll: Option<bool>,
}
@@ -363,7 +305,7 @@ struct InviteData {
fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let data: InviteData = data.into_inner().data;
let new_type = match UserOrgType::from_str(&data.Type.into_string()) {
let new_type = match UserOrgType::from_str(&data.Type.to_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid type")
};
@@ -377,8 +319,9 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
match user_opt {
None => err!("User email does not exist"),
Some(user) => {
if UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).is_some() {
err!("User already in organization")
match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) {
Some(_) => err!("User already in organization"),
None => ()
}
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
@@ -388,12 +331,13 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
// If no accessAll, add the collections received
if !access_all {
for col in &data.Collections {
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
for col in data.Collections.iter() {
match Collection::find_by_uuid_and_org(&col.id, &org_id, &conn) {
None => err!("Collection not found in Organization"),
Some(collection) => {
if CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, &conn).is_err() {
err!("Failed saving collection access for user")
match CollectionUser::save(&user.uuid, &collection.uuid, col.readOnly, &conn) {
Ok(()) => (),
Err(_) => err!("Failed saving collection access for user")
}
}
}
@@ -431,7 +375,7 @@ fn confirm_invite(org_id: String, user_id: String, data: JsonUpcase<Value>, head
}
user_to_confirm.status = UserOrgStatus::Confirmed as i32;
user_to_confirm.key = match data["Key"].as_str() {
user_to_confirm.key = match data["key"].as_str() {
Some(key) => key.to_string(),
None => err!("Invalid key provided")
};
@@ -459,21 +403,15 @@ fn get_user(org_id: String, user_id: String, _headers: AdminHeaders, conn: DbCon
#[allow(non_snake_case)]
struct EditUserData {
Type: NumberOrString,
#[serde(deserialize_with = "deserialize_collections")]
Collections: Vec<CollectionData>,
AccessAll: bool,
}
#[put("/organizations/<org_id>/users/<user_id>", data = "<data>", rank = 1)]
fn put_organization_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
edit_user(org_id, user_id, data, headers, conn)
}
#[post("/organizations/<org_id>/users/<user_id>", data = "<data>", rank = 1)]
fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let data: EditUserData = data.into_inner().data;
let new_type = match UserOrgType::from_str(&data.Type.into_string()) {
let new_type = match UserOrgType::from_str(&data.Type.to_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid type")
};
@@ -511,19 +449,21 @@ fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, he
// Delete all the odd collections
for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) {
if c.delete(&conn).is_err() {
err!("Failed deleting old collection assignment")
match c.delete(&conn) {
Ok(()) => (),
Err(_) => err!("Failed deleting old collection assignment")
}
}
// If no accessAll, add the collections received
if !data.AccessAll {
for col in &data.Collections {
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
for col in data.Collections.iter() {
match Collection::find_by_uuid_and_org(&col.id, &org_id, &conn) {
None => err!("Collection not found in Organization"),
Some(collection) => {
if CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.ReadOnly, &conn).is_err() {
err!("Failed saving collection access for user")
match CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.readOnly, &conn) {
Ok(()) => (),
Err(_) => err!("Failed saving collection access for user")
}
}
}
@@ -535,7 +475,7 @@ fn edit_user(org_id: String, user_id: String, data: JsonUpcase<EditUserData>, he
Ok(())
}
#[delete("/organizations/<org_id>/users/<user_id>")]
#[post("/organizations/<org_id>/users/<user_id>/delete")]
fn delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let user_to_delete = match UserOrganization::find_by_uuid(&user_id, &conn) {
Some(user) => user,
@@ -562,9 +502,4 @@ fn delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbC
Ok(()) => Ok(()),
Err(_) => err!("Failed deleting user from organization")
}
}
#[post("/organizations/<org_id>/users/<user_id>/delete")]
fn post_delete_user(org_id: String, user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
delete_user(org_id, user_id, headers, conn)
}
+100 -399
View File
File diff suppressed because it is too large Load Diff
+43 -69
View File
@@ -1,3 +1,4 @@
use std::io;
use std::io::prelude::*;
use std::fs::{create_dir_all, File};
@@ -22,59 +23,24 @@ fn icon(domain: String) -> Content<Vec<u8>> {
return Content(icon_type, get_fallback_icon());
}
let icon = get_icon(&domain);
let url = format!("https://icons.bitwarden.com/{}/icon.png", domain);
// Get the icon, or fallback in case of error
let icon = match get_icon_cached(&domain, &url) {
Ok(icon) => icon,
Err(_) => return Content(icon_type, get_fallback_icon())
};
Content(icon_type, icon)
}
fn get_icon (domain: &str) -> Vec<u8> {
let path = format!("{}/{}.png", CONFIG.icon_cache_folder, domain);
if let Some(icon) = get_cached_icon(&path) {
return icon;
}
let url = get_icon_url(&domain);
// Get the icon, or fallback in case of error
match download_icon(&url) {
Ok(icon) => {
save_icon(&path, &icon);
icon
},
Err(e) => {
println!("Error downloading icon: {:?}", e);
get_fallback_icon()
}
}
}
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
// Try to read the cached icon, and return it if it exists
if let Ok(mut f) = File::open(path) {
let mut buffer = Vec::new();
if f.read_to_end(&mut buffer).is_ok() {
return Some(buffer);
}
}
None
}
fn get_icon_url(domain: &str) -> String {
if CONFIG.local_icon_extractor {
format!("http://{}/favicon.ico", domain)
} else {
format!("https://icons.bitwarden.com/{}/icon.png", domain)
}
}
fn download_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
println!("Downloading icon for {}...", url);
fn get_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
let mut res = reqwest::get(url)?;
res = res.error_for_status()?;
res = match res.error_for_status() {
Err(e) => return Err(e),
Ok(res) => res
};
let mut buffer: Vec<u8> = vec![];
res.copy_to(&mut buffer)?;
@@ -82,31 +48,39 @@ fn download_icon(url: &str) -> Result<Vec<u8>, reqwest::Error> {
Ok(buffer)
}
fn save_icon(path: &str, icon: &[u8]) {
create_dir_all(&CONFIG.icon_cache_folder).expect("Error creating icon cache");
fn get_icon_cached(key: &str, url: &str) -> io::Result<Vec<u8>> {
create_dir_all(&CONFIG.icon_cache_folder)?;
let path = &format!("{}/{}.png", CONFIG.icon_cache_folder, key);
if let Ok(mut f) = File::create(path) {
f.write_all(icon).expect("Error writing icon file");
// Try to read the cached icon, and return it if it exists
match File::open(path) {
Ok(mut f) => {
let mut buffer = Vec::new();
if f.read_to_end(&mut buffer).is_ok() {
return Ok(buffer);
}
/* If error reading file continue */
}
Err(_) => { /* Continue */ }
}
println!("Downloading icon for {}...", key);
let icon = match get_icon(url) {
Ok(icon) => icon,
Err(_) => return Err(io::Error::new(io::ErrorKind::NotFound, ""))
};
}
const FALLBACK_ICON_URL: &str = "https://raw.githubusercontent.com/bitwarden/web/master/src/images/fa-globe.png";
// Save the currently downloaded icon
match File::create(path) {
Ok(mut f) => { f.write_all(&icon).expect("Error writing icon file"); }
Err(_) => { /* Continue */ }
};
Ok(icon)
}
fn get_fallback_icon() -> Vec<u8> {
let path = format!("{}/default.png", CONFIG.icon_cache_folder);
if let Some(icon) = get_cached_icon(&path) {
return icon;
}
match download_icon(FALLBACK_ICON_URL) {
Ok(icon) => {
save_icon(&path, &icon);
icon
},
Err(e) => {
println!("Error downloading fallback icon: {:?}", e);
vec![]
}
}
let fallback_icon = "https://raw.githubusercontent.com/bitwarden/web/master/src/images/fa-globe.png";
get_icon_cached("default", fallback_icon).unwrap()
}
+119 -174
View File
File diff suppressed because it is too large Load Diff
+6 -7
View File
@@ -1,4 +1,4 @@
pub(crate) mod core;
mod core;
mod icons;
mod identity;
mod web;
@@ -12,9 +12,8 @@ use rocket::response::status::BadRequest;
use rocket_contrib::Json;
// Type aliases for API methods results
type ApiResult<T> = Result<T, BadRequest<Json>>;
type JsonResult = ApiResult<Json>;
type EmptyResult = ApiResult<()>;
type JsonResult = Result<Json, BadRequest<Json>>;
type EmptyResult = Result<(), BadRequest<Json>>;
use util;
type JsonUpcase<T> = Json<util::UpCase<T>>;
@@ -26,7 +25,7 @@ struct PasswordData {
MasterPasswordHash: String
}
#[derive(Deserialize, Debug, Clone)]
#[derive(Deserialize, Debug)]
#[serde(untagged)]
enum NumberOrString {
Number(i32),
@@ -34,14 +33,14 @@ enum NumberOrString {
}
impl NumberOrString {
fn into_string(self) -> String {
fn to_string(self) -> String {
match self {
NumberOrString::Number(n) => n.to_string(),
NumberOrString::String(s) => s
}
}
fn into_i32(self) -> Option<i32> {
fn to_i32(self) -> Option<i32> {
match self {
NumberOrString::Number(n) => Some(n),
NumberOrString::String(s) => s.parse().ok()
+16 -50
View File
@@ -1,73 +1,39 @@
use std::io;
use std::path::{Path, PathBuf};
use rocket::request::Request;
use rocket::response::{self, NamedFile, Responder};
use rocket::response::content::Content;
use rocket::http::{ContentType, Status};
use rocket::Route;
use rocket_contrib::{Json, Value};
use rocket::response::NamedFile;
use rocket_contrib::Json;
use CONFIG;
pub fn routes() -> Vec<Route> {
if CONFIG.web_vault_enabled {
routes![web_index, app_id, web_files, attachments, alive]
} else {
routes![attachments, alive]
}
routes![index, files, attachments, alive]
}
// TODO: Might want to use in memory cache: https://github.com/hgzimmerman/rocket-file-cache
#[get("/")]
fn web_index() -> WebHeaders<io::Result<NamedFile>> {
web_files("index.html".into())
}
#[get("/app-id.json")]
fn app_id() -> WebHeaders<Content<Json<Value>>> {
let content_type = ContentType::new("application", "fido.trusted-apps+json");
WebHeaders(Content(content_type, Json(json!({
"trustedFacets": [
{
"version": { "major": 1, "minor": 0 },
"ids": [
&CONFIG.domain,
"ios:bundle-id:com.8bit.bitwarden",
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
}]
}))))
fn index() -> io::Result<NamedFile> {
NamedFile::open(
Path::new(&CONFIG.web_vault_folder)
.join("index.html"))
}
#[get("/<p..>", rank = 1)] // Only match this if the other routes don't match
fn web_files(p: PathBuf) -> WebHeaders<io::Result<NamedFile>> {
WebHeaders(NamedFile::open(Path::new(&CONFIG.web_vault_folder).join(p)))
fn files(p: PathBuf) -> io::Result<NamedFile> {
NamedFile::open(
Path::new(&CONFIG.web_vault_folder)
.join(p))
}
struct WebHeaders<R>(R);
impl<'r, R: Responder<'r>> Responder<'r> for WebHeaders<R> {
fn respond_to(self, req: &Request) -> response::Result<'r> {
match self.0.respond_to(req) {
Ok(mut res) => {
res.set_raw_header("Referrer-Policy", "same-origin");
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
res.set_raw_header("X-Content-Type-Options", "nosniff");
res.set_raw_header("X-XSS-Protection", "1; mode=block");
Ok(res)
},
Err(_) => {
Err(Status::NotFound)
}
}
}
}
#[get("/attachments/<uuid>/<file..>")]
fn attachments(uuid: String, file: PathBuf) -> io::Result<NamedFile> {
NamedFile::open(Path::new(&CONFIG.attachments_folder).join(uuid).join(file))
NamedFile::open(
Path::new(&CONFIG.attachments_folder)
.join(uuid)
.join(file)
)
}
+9 -38
View File
@@ -11,11 +11,10 @@ use serde::ser::Serialize;
use CONFIG;
const JWT_ALGORITHM: jwt::Algorithm = jwt::Algorithm::RS256;
pub const JWT_ISSUER: &'static str = "localhost:8000/identity";
lazy_static! {
pub static ref DEFAULT_VALIDITY: Duration = Duration::hours(2);
pub static ref JWT_ISSUER: String = CONFIG.domain.clone();
static ref JWT_HEADER: jwt::Header = jwt::Header::new(JWT_ALGORITHM);
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key) {
@@ -31,9 +30,9 @@ lazy_static! {
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
match jwt::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
Ok(token) => token,
Ok(token) => return token,
Err(e) => panic!("Error encoding jwt {}", e)
}
};
}
pub fn decode_jwt(token: &str) -> Result<JWTClaims, String> {
@@ -43,7 +42,7 @@ pub fn decode_jwt(token: &str) -> Result<JWTClaims, String> {
validate_iat: true,
validate_nbf: true,
aud: None,
iss: Some(JWT_ISSUER.clone()),
iss: Some(JWT_ISSUER.into()),
sub: None,
algorithms: vec![JWT_ALGORITHM],
};
@@ -95,7 +94,7 @@ use rocket::Outcome;
use rocket::request::{self, Request, FromRequest};
use db::DbConn;
use db::models::{User, UserOrganization, UserOrgType, UserOrgStatus, Device};
use db::models::{User, UserOrganization, UserOrgType, Device};
pub struct Headers {
pub host: String,
@@ -110,31 +109,9 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
let headers = request.headers();
// Get host
let host = if CONFIG.domain_set {
CONFIG.domain.clone()
} else if let Some(referer) = headers.get_one("Referer") {
referer.to_string()
} else {
// Try to guess from the headers
use std::env;
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
proto
} else if env::var("ROCKET_TLS").is_ok() {
"https"
} else {
"http"
};
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
host
} else if let Some(host) = headers.get_one("Host") {
host
} else {
""
};
format!("{}://{}", protocol, host)
let host = match headers.get_one("Host") {
Some(host) => format!("http://{}", host), // TODO: Check if HTTPS
_ => String::new()
};
// Get access_token
@@ -205,13 +182,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
};
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
Some(user) => {
if user.status == UserOrgStatus::Confirmed as i32 {
user
} else {
err_handler!("The current user isn't confirmed member of the organization")
}
}
Some(user) => user,
None => err_handler!("The current user isn't member of the organization")
};
+6 -31
View File
@@ -64,33 +64,14 @@ impl Attachment {
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
use util;
use std::{thread, time};
let mut retries = 10;
loop {
match diesel::delete(
attachments::table.filter(
attachments::id.eq(&self.id)
)
).execute(&**conn) {
Ok(_) => break,
Err(err) => {
if retries < 1 {
println!("ERROR: Failed with 10 retries");
return Err(err)
} else {
retries = retries - 1;
println!("Had to retry! Retries left: {}", retries);
thread::sleep(time::Duration::from_millis(500));
continue
}
}
}
}
util::delete_file(&self.get_file_path());
Ok(())
diesel::delete(
attachments::table.filter(
attachments::id.eq(self.id)
)
).execute(&**conn).and(Ok(()))
}
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> QueryResult<()> {
@@ -111,10 +92,4 @@ impl Attachment {
.filter(attachments::cipher_uuid.eq(cipher_uuid))
.load::<Self>(&**conn).expect("Error loading attachments")
}
pub fn find_by_ciphers(cipher_uuids: Vec<String>, conn: &DbConn) -> Vec<Self> {
attachments::table
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
.load::<Self>(&**conn).expect("Error loading attachments")
}
}
+4 -27
View File
@@ -3,7 +3,7 @@ use serde_json::Value as JsonValue;
use uuid::Uuid;
use super::{User, Organization, Attachment, FolderCipher, CollectionCipher, UserOrganization, UserOrgType, UserOrgStatus};
use super::{User, Organization, Attachment, FolderCipher, CollectionCipher, UserOrgType};
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
#[table_name = "ciphers"]
@@ -84,7 +84,7 @@ impl Cipher {
// To remove backwards compatibility, just remove this entire section
// and remove the compat code from ciphers::update_cipher_from_data
if self.type_ == 1 && data_json["Uris"].is_array() {
let uri = data_json["Uris"][0]["Uri"].clone();
let uri = data_json["Uris"][0]["uri"].clone();
data_json["Uri"] = uri;
}
// TODO: ******* Backwards compat end **********
@@ -97,7 +97,7 @@ impl Cipher {
"Favorite": self.favorite,
"OrganizationId": self.organization_uuid,
"Attachments": attachments_json,
"OrganizationUseTotp": true,
"OrganizationUseTotp": false,
"CollectionIds": self.get_collections(user_uuid, &conn),
"Name": self.name,
@@ -122,23 +122,7 @@ impl Cipher {
json_object
}
pub fn update_users_revision(&self, conn: &DbConn) {
match self.user_uuid {
Some(ref user_uuid) => User::update_uuid_revision(&user_uuid, conn),
None => { // Belongs to Organization, need to update affected users
if let Some(ref org_uuid) = self.organization_uuid {
UserOrganization::find_by_cipher_and_org(&self.uuid, &org_uuid, conn)
.iter()
.for_each(|user_org| {
User::update_uuid_revision(&user_org.user_uuid, conn)
});
}
}
};
}
pub fn save(&mut self, conn: &DbConn) -> bool {
self.update_users_revision(conn);
self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(ciphers::table)
@@ -150,8 +134,6 @@ impl Cipher {
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
self.update_users_revision(conn);
FolderCipher::delete_all_by_cipher(&self.uuid, &conn)?;
CollectionCipher::delete_all_by_cipher(&self.uuid, &conn)?;
Attachment::delete_all_by_cipher(&self.uuid, &conn)?;
@@ -175,7 +157,6 @@ impl Cipher {
None => {
match folder_uuid {
Some(new_folder) => {
self.update_users_revision(conn);
let folder_cipher = FolderCipher::new(&new_folder, &self.uuid);
folder_cipher.save(&conn).or(Err("Couldn't save folder setting"))
},
@@ -188,7 +169,6 @@ impl Cipher {
if current_folder == new_folder {
Ok(()) //nothing to do
} else {
self.update_users_revision(conn);
match FolderCipher::find_by_folder_and_cipher(&current_folder, &self.uuid, &conn) {
Some(current_folder) => {
current_folder.delete(&conn).or(Err("Failed removing old folder mapping"))
@@ -201,7 +181,6 @@ impl Cipher {
}
},
None => {
self.update_users_revision(conn);
match FolderCipher::find_by_folder_and_cipher(&current_folder, &self.uuid, &conn) {
Some(current_folder) => {
current_folder.delete(&conn).or(Err("Failed removing old folder mapping"))
@@ -287,9 +266,7 @@ impl Cipher {
ciphers::table
.left_join(users_organizations::table.on(
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()).and(
users_organizations::user_uuid.eq(user_uuid).and(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
)
users_organizations::user_uuid.eq(user_uuid)
)
))
.left_join(ciphers_collections::table)
+1 -18
View File
@@ -2,7 +2,7 @@ use serde_json::Value as JsonValue;
use uuid::Uuid;
use super::{Organization, UserOrganization, UserOrgType, UserOrgStatus};
use super::{Organization, UserOrganization, UserOrgType};
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
#[table_name = "collections"]
@@ -78,18 +78,13 @@ impl Collection {
pub fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
let mut all_access_collections = users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.filter(users_organizations::access_all.eq(true))
.inner_join(collections::table.on(collections::org_uuid.eq(users_organizations::org_uuid)))
.select(collections::all_columns)
.load::<Self>(&**conn).expect("Error loading collections");
let mut assigned_collections = users_collections::table.inner_join(collections::table)
.left_join(users_organizations::table.on(
users_collections::user_uuid.eq(users_organizations::user_uuid)
))
.filter(users_collections::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.select(collections::all_columns)
.load::<Self>(&**conn).expect("Error loading collections");
@@ -185,8 +180,6 @@ impl CollectionUser {
}
pub fn save(user_uuid: &str, collection_uuid: &str, read_only:bool, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&user_uuid, conn);
diesel::replace_into(users_collections::table)
.values((
users_collections::user_uuid.eq(user_uuid),
@@ -196,8 +189,6 @@ impl CollectionUser {
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&self.user_uuid, conn);
diesel::delete(users_collections::table
.filter(users_collections::user_uuid.eq(&self.user_uuid))
.filter(users_collections::collection_uuid.eq(&self.collection_uuid)))
@@ -220,20 +211,12 @@ impl CollectionUser {
}
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> QueryResult<()> {
CollectionUser::find_by_collection(&collection_uuid, conn)
.iter()
.for_each(|collection| {
User::update_uuid_revision(&collection.user_uuid, conn)
});
diesel::delete(users_collections::table
.filter(users_collections::collection_uuid.eq(collection_uuid))
).execute(&**conn).and(Ok(()))
}
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&user_uuid, conn);
diesel::delete(users_collections::table
.filter(users_collections::user_uuid.eq(user_uuid))
).execute(&**conn).and(Ok(()))
+2 -5
View File
@@ -43,14 +43,11 @@ impl Device {
}
}
pub fn refresh_twofactor_remember(&mut self) -> String {
pub fn refresh_twofactor_remember(&mut self) {
use data_encoding::BASE64;
use crypto;
let twofactor_remember = BASE64.encode(&crypto::get_random(vec![0u8; 180]));
self.twofactor_remember = Some(twofactor_remember.clone());
twofactor_remember
self.twofactor_remember = Some(BASE64.encode(&crypto::get_random(vec![0u8; 180])));
}
pub fn delete_twofactor_remember(&mut self) {
-2
View File
@@ -71,7 +71,6 @@ use db::schema::{folders, folders_ciphers};
/// Database methods
impl Folder {
pub fn save(&mut self, conn: &DbConn) -> bool {
User::update_uuid_revision(&self.user_uuid, conn);
self.updated_at = Utc::now().naive_utc();
match diesel::replace_into(folders::table)
@@ -83,7 +82,6 @@ impl Folder {
}
pub fn delete(self, conn: &DbConn) -> QueryResult<()> {
User::update_uuid_revision(&self.user_uuid, conn);
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
diesel::delete(
-2
View File
@@ -6,7 +6,6 @@ mod user;
mod collection;
mod organization;
mod two_factor;
pub use self::attachment::Attachment;
pub use self::cipher::Cipher;
@@ -16,4 +15,3 @@ pub use self::user::User;
pub use self::organization::Organization;
pub use self::organization::{UserOrganization, UserOrgStatus, UserOrgType};
pub use self::collection::{Collection, CollectionUser, CollectionCipher};
pub use self::two_factor::{TwoFactor, TwoFactorType};

Some files were not shown because too many files have changed in this diff Show More