From 5e5cde1624b1b4ffd00efa73935c48e547a5a8d3 Mon Sep 17 00:00:00 2001 From: Rutger Broekhoff Date: Sat, 30 Mar 2024 15:19:49 +0100 Subject: Restructure authorization flow Tried to write verify_claims such that the happy path is to the left as much as possible. --- common/src/lib.rs | 102 ++++++++++++++++++++++++++--------------------------- server/src/main.rs | 97 +++++++++++++++++++++++++++++--------------------- 2 files changed, 107 insertions(+), 92 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index c26150d..917f566 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -7,6 +7,27 @@ use std::{ }; use subtle::ConstantTimeEq; +#[repr(u8)] +enum AuthType { + BatchApi = 1, + Download = 2, +} + +#[derive(Debug, Copy, Clone)] +pub struct Claims<'a> { + pub specific_claims: SpecificClaims, + pub repo_path: &'a str, + pub expires_at: DateTime, +} + +#[derive(Debug, Copy, Clone)] +pub enum SpecificClaims { + BatchApi(Operation), + Download(Oid), +} + +pub type Oid = Digest<32>; + #[derive(Debug, Eq, PartialEq, Copy, Clone, Serialize, Deserialize)] #[repr(u8)] pub enum Operation { @@ -16,6 +37,29 @@ pub enum Operation { Upload = 2, } +/// Returns None if the claims are invalid. Repo path length may be no more than 100 bytes. +pub fn generate_tag(claims: Claims, key: impl AsRef<[u8]>) -> Option> { + if claims.repo_path.len() > 100 { + return None; + } + + let mut hmac = hmac_sha256::HMAC::new(key); + match claims.specific_claims { + SpecificClaims::BatchApi(operation) => { + hmac.update([AuthType::BatchApi as u8]); + hmac.update([operation as u8]); + } + SpecificClaims::Download(oid) => { + hmac.update([AuthType::Download as u8]); + hmac.update(oid.as_bytes()); + } + } + hmac.update([claims.repo_path.len() as u8]); + hmac.update(claims.repo_path.as_bytes()); + hmac.update(claims.expires_at.timestamp().to_be_bytes()); + Some(hmac.finalize().into()) +} + #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct ParseOperationError; @@ -37,12 +81,6 @@ impl FromStr for Operation { } } -#[repr(u8)] -enum AuthType { - BatchApi = 1, - Download = 2, -} - /// None means out of range. fn decode_nibble(c: u8) -> Option { if c.is_ascii_digit() { @@ -148,6 +186,13 @@ fn parse_hex_exact(value: &str, buf: &mut [u8]) -> Result<(), ParseHexError> { Ok(()) } +pub type Key = SafeByteArray<64>; + +pub fn load_key(path: &str) -> Result { + let key_str = std::fs::read_to_string(path).map_err(ReadHexError::Io)?; + key_str.trim().parse().map_err(ReadHexError::Format) +} + pub struct SafeByteArray { inner: [u8; N], } @@ -192,44 +237,6 @@ impl FromStr for SafeByteArray { } } -pub type Oid = Digest<32>; - -#[derive(Debug, Copy, Clone)] -pub enum SpecificClaims { - BatchApi(Operation), - Download(Oid), -} - -#[derive(Debug, Copy, Clone)] -pub struct Claims<'a> { - pub specific_claims: SpecificClaims, - pub repo_path: &'a str, - pub expires_at: DateTime, -} - -/// Returns None if the claims are invalid. Repo path length may be no more than 100 bytes. -pub fn generate_tag(claims: Claims, key: impl AsRef<[u8]>) -> Option> { - if claims.repo_path.len() > 100 { - return None; - } - - let mut hmac = hmac_sha256::HMAC::new(key); - match claims.specific_claims { - SpecificClaims::BatchApi(operation) => { - hmac.update([AuthType::BatchApi as u8]); - hmac.update([operation as u8]); - } - SpecificClaims::Download(oid) => { - hmac.update([AuthType::Download as u8]); - hmac.update(oid.as_bytes()); - } - } - hmac.update([claims.repo_path.len() as u8]); - hmac.update(claims.repo_path.as_bytes()); - hmac.update(claims.expires_at.timestamp().to_be_bytes()); - Some(hmac.finalize().into()) -} - pub struct HexFmt>(pub B); impl> fmt::Display for HexFmt { @@ -339,10 +346,3 @@ impl Serialize for Digest { serializer.serialize_str(&format!("{self}")) } } - -pub type Key = SafeByteArray<64>; - -pub fn load_key(path: &str) -> Result { - let key_str = std::fs::read_to_string(path).map_err(ReadHexError::Io)?; - key_str.trim().parse().map_err(ReadHexError::Format) -} diff --git a/server/src/main.rs b/server/src/main.rs index 4a88dcd..e615d19 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -762,28 +762,46 @@ fn authorize_batch( specific_claims: common::SpecificClaims::BatchApi(operation), repo_path, }; - if verify_claims(conf, &claims, headers)? { - return Ok(Trusted(true)); + if !verify_claims(conf, &claims, headers)? { + return authorize_batch_unauthenticated(conf, public, operation, headers); } + return Ok(Trusted(true)); +} +fn authorize_batch_unauthenticated( + conf: &AuthorizationConfig, + public: bool, + operation: common::Operation, + headers: &HeaderMap, +) -> Result> { let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; - if operation != common::Operation::Download { - if trusted { + match operation { + common::Operation::Upload => { + // Trusted users can clone all repositories (by virtue of accessing the server via a + // trusted network). However, they can not push without proper authentication. Untrusted + // users who are also not authenticated should not need to know which repositories exists. + // Therefore, we tell untrusted && unauthenticated users that the repo doesn't exist, but + // tell trusted users that they need to authenticate. + if !trusted { + return Err(REPO_NOT_FOUND); + } return Err(make_error_resp( StatusCode::FORBIDDEN, "Authentication required to upload", )); + }, + common::Operation::Download => { + // Again, trusted users can see all repos. For untrusted users, we first need to check + // whether the repo is public before we authorize. If the user is untrusted and the + // repo isn't public, we just act like it doesn't even exist. + if !trusted { + if !public { + return Err(REPO_NOT_FOUND) + } + return Ok(Trusted(false)) + } + return Ok(Trusted(true)); } - return Err(REPO_NOT_FOUND); - } - if trusted { - return Ok(Trusted(true)); - } - - if public { - Ok(Trusted(false)) - } else { - Err(REPO_NOT_FOUND) } } @@ -909,35 +927,32 @@ fn verify_claims( const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = make_error_resp(StatusCode::BAD_REQUEST, "Invalid authorization header"); - if let Some(authz) = headers.get(header::AUTHORIZATION) { - if let Ok(authz) = authz.to_str() { - if let Some(val) = authz.strip_prefix("Gitolfs3-Hmac-Sha256 ") { - let (tag, expires_at) = val.split_once(' ').ok_or(INVALID_AUTHZ_HEADER)?; - let tag: common::Digest<32> = tag.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; - let expires_at: i64 = expires_at.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; - let expires_at = - DateTime::::from_timestamp(expires_at, 0).ok_or(INVALID_AUTHZ_HEADER)?; - let Some(expected_tag) = common::generate_tag( - common::Claims { - specific_claims: claims.specific_claims, - repo_path: claims.repo_path, - expires_at, - }, - &conf.key, - ) else { - return Err(make_error_resp( - StatusCode::INTERNAL_SERVER_ERROR, - "Internal server error", - )); - }; - if tag == expected_tag { - return Ok(true); - } - } - } + let Some(authz) = headers.get(header::AUTHORIZATION) else { + return Ok(false); + }; + let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; + let val = authz.strip_prefix("Gitolfs3-Hmac-Sha256 ").ok_or(INVALID_AUTHZ_HEADER)?; + let (tag, expires_at) = val.split_once(' ').ok_or(INVALID_AUTHZ_HEADER)?; + let tag: common::Digest<32> = tag.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; + let expires_at: i64 = expires_at.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; + let expires_at = + DateTime::::from_timestamp(expires_at, 0).ok_or(INVALID_AUTHZ_HEADER)?; + let expected_tag = common::generate_tag( + common::Claims { + specific_claims: claims.specific_claims, + repo_path: claims.repo_path, + expires_at, + }, + &conf.key, + ).ok_or_else(|| make_error_resp( + StatusCode::INTERNAL_SERVER_ERROR, + "Internal server error", + ))?; + if tag != expected_tag { return Err(INVALID_AUTHZ_HEADER); } - Ok(false) + + Ok(true) } fn authorize_get( -- cgit v1.2.3