diff options
| author | Rutger Broekhoff | 2024-03-30 15:19:49 +0100 |
|---|---|---|
| committer | Rutger Broekhoff | 2024-03-30 15:19:49 +0100 |
| commit | 5e5cde1624b1b4ffd00efa73935c48e547a5a8d3 (patch) | |
| tree | 67bceba2d25cf3298cd83b2f4e876752684dd704 | |
| parent | f033c6889e0071b29e75c551586e8e5da1b556a3 (diff) | |
| download | gitolfs3-5e5cde1624b1b4ffd00efa73935c48e547a5a8d3.tar.gz gitolfs3-5e5cde1624b1b4ffd00efa73935c48e547a5a8d3.zip | |
Restructure authorization flow
Tried to write verify_claims such that the happy path is to the left as
much as possible.
| -rw-r--r-- | common/src/lib.rs | 102 | ||||
| -rw-r--r-- | server/src/main.rs | 97 |
2 files changed, 107 insertions, 92 deletions
diff --git a/common/src/lib.rs b/common/src/lib.rs index c26150d..917f566 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs | |||
| @@ -7,6 +7,27 @@ use std::{ | |||
| 7 | }; | 7 | }; |
| 8 | use subtle::ConstantTimeEq; | 8 | use subtle::ConstantTimeEq; |
| 9 | 9 | ||
| 10 | #[repr(u8)] | ||
| 11 | enum AuthType { | ||
| 12 | BatchApi = 1, | ||
| 13 | Download = 2, | ||
| 14 | } | ||
| 15 | |||
| 16 | #[derive(Debug, Copy, Clone)] | ||
| 17 | pub struct Claims<'a> { | ||
| 18 | pub specific_claims: SpecificClaims, | ||
| 19 | pub repo_path: &'a str, | ||
| 20 | pub expires_at: DateTime<Utc>, | ||
| 21 | } | ||
| 22 | |||
| 23 | #[derive(Debug, Copy, Clone)] | ||
| 24 | pub enum SpecificClaims { | ||
| 25 | BatchApi(Operation), | ||
| 26 | Download(Oid), | ||
| 27 | } | ||
| 28 | |||
| 29 | pub type Oid = Digest<32>; | ||
| 30 | |||
| 10 | #[derive(Debug, Eq, PartialEq, Copy, Clone, Serialize, Deserialize)] | 31 | #[derive(Debug, Eq, PartialEq, Copy, Clone, Serialize, Deserialize)] |
| 11 | #[repr(u8)] | 32 | #[repr(u8)] |
| 12 | pub enum Operation { | 33 | pub enum Operation { |
| @@ -16,6 +37,29 @@ pub enum Operation { | |||
| 16 | Upload = 2, | 37 | Upload = 2, |
| 17 | } | 38 | } |
| 18 | 39 | ||
| 40 | /// Returns None if the claims are invalid. Repo path length may be no more than 100 bytes. | ||
| 41 | pub fn generate_tag(claims: Claims, key: impl AsRef<[u8]>) -> Option<Digest<32>> { | ||
| 42 | if claims.repo_path.len() > 100 { | ||
| 43 | return None; | ||
| 44 | } | ||
| 45 | |||
| 46 | let mut hmac = hmac_sha256::HMAC::new(key); | ||
| 47 | match claims.specific_claims { | ||
| 48 | SpecificClaims::BatchApi(operation) => { | ||
| 49 | hmac.update([AuthType::BatchApi as u8]); | ||
| 50 | hmac.update([operation as u8]); | ||
| 51 | } | ||
| 52 | SpecificClaims::Download(oid) => { | ||
| 53 | hmac.update([AuthType::Download as u8]); | ||
| 54 | hmac.update(oid.as_bytes()); | ||
| 55 | } | ||
| 56 | } | ||
| 57 | hmac.update([claims.repo_path.len() as u8]); | ||
| 58 | hmac.update(claims.repo_path.as_bytes()); | ||
| 59 | hmac.update(claims.expires_at.timestamp().to_be_bytes()); | ||
| 60 | Some(hmac.finalize().into()) | ||
| 61 | } | ||
| 62 | |||
| 19 | #[derive(Debug, PartialEq, Eq, Copy, Clone)] | 63 | #[derive(Debug, PartialEq, Eq, Copy, Clone)] |
| 20 | pub struct ParseOperationError; | 64 | pub struct ParseOperationError; |
| 21 | 65 | ||
| @@ -37,12 +81,6 @@ impl FromStr for Operation { | |||
| 37 | } | 81 | } |
| 38 | } | 82 | } |
| 39 | 83 | ||
| 40 | #[repr(u8)] | ||
| 41 | enum AuthType { | ||
| 42 | BatchApi = 1, | ||
| 43 | Download = 2, | ||
| 44 | } | ||
| 45 | |||
| 46 | /// None means out of range. | 84 | /// None means out of range. |
| 47 | fn decode_nibble(c: u8) -> Option<u8> { | 85 | fn decode_nibble(c: u8) -> Option<u8> { |
| 48 | if c.is_ascii_digit() { | 86 | if c.is_ascii_digit() { |
| @@ -148,6 +186,13 @@ fn parse_hex_exact(value: &str, buf: &mut [u8]) -> Result<(), ParseHexError> { | |||
| 148 | Ok(()) | 186 | Ok(()) |
| 149 | } | 187 | } |
| 150 | 188 | ||
| 189 | pub type Key = SafeByteArray<64>; | ||
| 190 | |||
| 191 | pub fn load_key(path: &str) -> Result<Key, ReadHexError> { | ||
| 192 | let key_str = std::fs::read_to_string(path).map_err(ReadHexError::Io)?; | ||
| 193 | key_str.trim().parse().map_err(ReadHexError::Format) | ||
| 194 | } | ||
| 195 | |||
| 151 | pub struct SafeByteArray<const N: usize> { | 196 | pub struct SafeByteArray<const N: usize> { |
| 152 | inner: [u8; N], | 197 | inner: [u8; N], |
| 153 | } | 198 | } |
| @@ -192,44 +237,6 @@ impl<const N: usize> FromStr for SafeByteArray<N> { | |||
| 192 | } | 237 | } |
| 193 | } | 238 | } |
| 194 | 239 | ||
| 195 | pub type Oid = Digest<32>; | ||
| 196 | |||
| 197 | #[derive(Debug, Copy, Clone)] | ||
| 198 | pub enum SpecificClaims { | ||
| 199 | BatchApi(Operation), | ||
| 200 | Download(Oid), | ||
| 201 | } | ||
| 202 | |||
| 203 | #[derive(Debug, Copy, Clone)] | ||
| 204 | pub struct Claims<'a> { | ||
| 205 | pub specific_claims: SpecificClaims, | ||
| 206 | pub repo_path: &'a str, | ||
| 207 | pub expires_at: DateTime<Utc>, | ||
| 208 | } | ||
| 209 | |||
| 210 | /// Returns None if the claims are invalid. Repo path length may be no more than 100 bytes. | ||
| 211 | pub fn generate_tag(claims: Claims, key: impl AsRef<[u8]>) -> Option<Digest<32>> { | ||
| 212 | if claims.repo_path.len() > 100 { | ||
| 213 | return None; | ||
| 214 | } | ||
| 215 | |||
| 216 | let mut hmac = hmac_sha256::HMAC::new(key); | ||
| 217 | match claims.specific_claims { | ||
| 218 | SpecificClaims::BatchApi(operation) => { | ||
| 219 | hmac.update([AuthType::BatchApi as u8]); | ||
| 220 | hmac.update([operation as u8]); | ||
| 221 | } | ||
| 222 | SpecificClaims::Download(oid) => { | ||
| 223 | hmac.update([AuthType::Download as u8]); | ||
| 224 | hmac.update(oid.as_bytes()); | ||
| 225 | } | ||
| 226 | } | ||
| 227 | hmac.update([claims.repo_path.len() as u8]); | ||
| 228 | hmac.update(claims.repo_path.as_bytes()); | ||
| 229 | hmac.update(claims.expires_at.timestamp().to_be_bytes()); | ||
| 230 | Some(hmac.finalize().into()) | ||
| 231 | } | ||
| 232 | |||
| 233 | pub struct HexFmt<B: AsRef<[u8]>>(pub B); | 240 | pub struct HexFmt<B: AsRef<[u8]>>(pub B); |
| 234 | 241 | ||
| 235 | impl<B: AsRef<[u8]>> fmt::Display for HexFmt<B> { | 242 | impl<B: AsRef<[u8]>> fmt::Display for HexFmt<B> { |
| @@ -339,10 +346,3 @@ impl<const N: usize> Serialize for Digest<N> { | |||
| 339 | serializer.serialize_str(&format!("{self}")) | 346 | serializer.serialize_str(&format!("{self}")) |
| 340 | } | 347 | } |
| 341 | } | 348 | } |
| 342 | |||
| 343 | pub type Key = SafeByteArray<64>; | ||
| 344 | |||
| 345 | pub fn load_key(path: &str) -> Result<Key, ReadHexError> { | ||
| 346 | let key_str = std::fs::read_to_string(path).map_err(ReadHexError::Io)?; | ||
| 347 | key_str.trim().parse().map_err(ReadHexError::Format) | ||
| 348 | } | ||
diff --git a/server/src/main.rs b/server/src/main.rs index 4a88dcd..e615d19 100644 --- a/server/src/main.rs +++ b/server/src/main.rs | |||
| @@ -762,28 +762,46 @@ fn authorize_batch( | |||
| 762 | specific_claims: common::SpecificClaims::BatchApi(operation), | 762 | specific_claims: common::SpecificClaims::BatchApi(operation), |
| 763 | repo_path, | 763 | repo_path, |
| 764 | }; | 764 | }; |
| 765 | if verify_claims(conf, &claims, headers)? { | 765 | if !verify_claims(conf, &claims, headers)? { |
| 766 | return Ok(Trusted(true)); | 766 | return authorize_batch_unauthenticated(conf, public, operation, headers); |
| 767 | } | 767 | } |
| 768 | return Ok(Trusted(true)); | ||
| 769 | } | ||
| 768 | 770 | ||
| 771 | fn authorize_batch_unauthenticated( | ||
| 772 | conf: &AuthorizationConfig, | ||
| 773 | public: bool, | ||
| 774 | operation: common::Operation, | ||
| 775 | headers: &HeaderMap, | ||
| 776 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { | ||
| 769 | let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; | 777 | let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; |
| 770 | if operation != common::Operation::Download { | 778 | match operation { |
| 771 | if trusted { | 779 | common::Operation::Upload => { |
| 780 | // Trusted users can clone all repositories (by virtue of accessing the server via a | ||
| 781 | // trusted network). However, they can not push without proper authentication. Untrusted | ||
| 782 | // users who are also not authenticated should not need to know which repositories exists. | ||
| 783 | // Therefore, we tell untrusted && unauthenticated users that the repo doesn't exist, but | ||
| 784 | // tell trusted users that they need to authenticate. | ||
| 785 | if !trusted { | ||
| 786 | return Err(REPO_NOT_FOUND); | ||
| 787 | } | ||
| 772 | return Err(make_error_resp( | 788 | return Err(make_error_resp( |
| 773 | StatusCode::FORBIDDEN, | 789 | StatusCode::FORBIDDEN, |
| 774 | "Authentication required to upload", | 790 | "Authentication required to upload", |
| 775 | )); | 791 | )); |
| 792 | }, | ||
| 793 | common::Operation::Download => { | ||
| 794 | // Again, trusted users can see all repos. For untrusted users, we first need to check | ||
| 795 | // whether the repo is public before we authorize. If the user is untrusted and the | ||
| 796 | // repo isn't public, we just act like it doesn't even exist. | ||
| 797 | if !trusted { | ||
| 798 | if !public { | ||
| 799 | return Err(REPO_NOT_FOUND) | ||
| 800 | } | ||
| 801 | return Ok(Trusted(false)) | ||
| 802 | } | ||
| 803 | return Ok(Trusted(true)); | ||
| 776 | } | 804 | } |
| 777 | return Err(REPO_NOT_FOUND); | ||
| 778 | } | ||
| 779 | if trusted { | ||
| 780 | return Ok(Trusted(true)); | ||
| 781 | } | ||
| 782 | |||
| 783 | if public { | ||
| 784 | Ok(Trusted(false)) | ||
| 785 | } else { | ||
| 786 | Err(REPO_NOT_FOUND) | ||
| 787 | } | 805 | } |
| 788 | } | 806 | } |
| 789 | 807 | ||
| @@ -909,35 +927,32 @@ fn verify_claims( | |||
| 909 | const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = | 927 | const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = |
| 910 | make_error_resp(StatusCode::BAD_REQUEST, "Invalid authorization header"); | 928 | make_error_resp(StatusCode::BAD_REQUEST, "Invalid authorization header"); |
| 911 | 929 | ||
| 912 | if let Some(authz) = headers.get(header::AUTHORIZATION) { | 930 | let Some(authz) = headers.get(header::AUTHORIZATION) else { |
| 913 | if let Ok(authz) = authz.to_str() { | 931 | return Ok(false); |
| 914 | if let Some(val) = authz.strip_prefix("Gitolfs3-Hmac-Sha256 ") { | 932 | }; |
| 915 | let (tag, expires_at) = val.split_once(' ').ok_or(INVALID_AUTHZ_HEADER)?; | 933 | let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; |
| 916 | let tag: common::Digest<32> = tag.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; | 934 | let val = authz.strip_prefix("Gitolfs3-Hmac-Sha256 ").ok_or(INVALID_AUTHZ_HEADER)?; |
| 917 | let expires_at: i64 = expires_at.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; | 935 | let (tag, expires_at) = val.split_once(' ').ok_or(INVALID_AUTHZ_HEADER)?; |
| 918 | let expires_at = | 936 | let tag: common::Digest<32> = tag.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; |
| 919 | DateTime::<Utc>::from_timestamp(expires_at, 0).ok_or(INVALID_AUTHZ_HEADER)?; | 937 | let expires_at: i64 = expires_at.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; |
| 920 | let Some(expected_tag) = common::generate_tag( | 938 | let expires_at = |
| 921 | common::Claims { | 939 | DateTime::<Utc>::from_timestamp(expires_at, 0).ok_or(INVALID_AUTHZ_HEADER)?; |
| 922 | specific_claims: claims.specific_claims, | 940 | let expected_tag = common::generate_tag( |
| 923 | repo_path: claims.repo_path, | 941 | common::Claims { |
| 924 | expires_at, | 942 | specific_claims: claims.specific_claims, |
| 925 | }, | 943 | repo_path: claims.repo_path, |
| 926 | &conf.key, | 944 | expires_at, |
| 927 | ) else { | 945 | }, |
| 928 | return Err(make_error_resp( | 946 | &conf.key, |
| 929 | StatusCode::INTERNAL_SERVER_ERROR, | 947 | ).ok_or_else(|| make_error_resp( |
| 930 | "Internal server error", | 948 | StatusCode::INTERNAL_SERVER_ERROR, |
| 931 | )); | 949 | "Internal server error", |
| 932 | }; | 950 | ))?; |
| 933 | if tag == expected_tag { | 951 | if tag != expected_tag { |
| 934 | return Ok(true); | ||
| 935 | } | ||
| 936 | } | ||
| 937 | } | ||
| 938 | return Err(INVALID_AUTHZ_HEADER); | 952 | return Err(INVALID_AUTHZ_HEADER); |
| 939 | } | 953 | } |
| 940 | Ok(false) | 954 | |
| 955 | Ok(true) | ||
| 941 | } | 956 | } |
| 942 | 957 | ||
| 943 | fn authorize_get( | 958 | fn authorize_get( |