diff options
Diffstat (limited to 'gitolfs3-server')
| -rw-r--r-- | gitolfs3-server/Cargo.toml | 14 | ||||
| -rw-r--r-- | gitolfs3-server/src/api.rs | 223 | ||||
| -rw-r--r-- | gitolfs3-server/src/authz.rs | 84 | ||||
| -rw-r--r-- | gitolfs3-server/src/config.rs | 140 | ||||
| -rw-r--r-- | gitolfs3-server/src/dlimit.rs | 2 | ||||
| -rw-r--r-- | gitolfs3-server/src/handler.rs | 511 | ||||
| -rw-r--r-- | gitolfs3-server/src/main.rs | 25 |
7 files changed, 512 insertions, 487 deletions
diff --git a/gitolfs3-server/Cargo.toml b/gitolfs3-server/Cargo.toml index efea78b..5908770 100644 --- a/gitolfs3-server/Cargo.toml +++ b/gitolfs3-server/Cargo.toml | |||
| @@ -1,20 +1,20 @@ | |||
| 1 | [package] | 1 | [package] |
| 2 | name = "gitolfs3-server" | 2 | name = "gitolfs3-server" |
| 3 | version = "0.1.0" | 3 | version = "0.1.0" |
| 4 | edition = "2021" | 4 | edition = "2024" |
| 5 | license = "MIT" | 5 | license = "MIT" |
| 6 | 6 | ||
| 7 | [dependencies] | 7 | [dependencies] |
| 8 | aws-config = { version = "1.1.2" } | 8 | aws-config = "1.6" |
| 9 | aws-sdk-s3 = "1.12.0" | 9 | aws-sdk-s3 = "1.82" |
| 10 | axum = "0.7" | 10 | axum = "0.8" |
| 11 | base64 = "0.21" | 11 | base64 = "0.22" |
| 12 | chrono = { version = "0.4", features = ["serde"] } | 12 | chrono = { version = "0.4", features = ["serde"] } |
| 13 | gitolfs3-common = { path = "../gitolfs3-common" } | 13 | gitolfs3-common = { path = "../gitolfs3-common" } |
| 14 | mime = "0.3" | 14 | mime = "0.3" |
| 15 | serde = { version = "1", features = ["derive"] } | 15 | serde = { version = "1", features = ["derive"] } |
| 16 | serde_json = "1" | 16 | serde_json = "1" |
| 17 | tokio = { version = "1.35", features = ["full"] } | 17 | tokio = { version = "1.44", features = ["full"] } |
| 18 | tokio-util = "0.7" | 18 | tokio-util = "0.7" |
| 19 | tower = "0.4" | 19 | tower = "0.5" |
| 20 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } | 20 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } |
diff --git a/gitolfs3-server/src/api.rs b/gitolfs3-server/src/api.rs index dba7ada..87c1856 100644 --- a/gitolfs3-server/src/api.rs +++ b/gitolfs3-server/src/api.rs | |||
| @@ -1,89 +1,33 @@ | |||
| 1 | use std::collections::HashMap; | 1 | use std::collections::HashMap; |
| 2 | 2 | ||
| 3 | use axum::{ | 3 | use axum::{ |
| 4 | async_trait, | ||
| 5 | extract::{rejection, FromRequest, FromRequestParts, Request}, | ||
| 6 | http::{header, request::Parts, HeaderValue, StatusCode}, | ||
| 7 | response::{IntoResponse, Response}, | ||
| 8 | Extension, Json, | 4 | Extension, Json, |
| 5 | extract::{FromRequest, FromRequestParts, Request, rejection}, | ||
| 6 | http, | ||
| 7 | response::{IntoResponse, Response}, | ||
| 9 | }; | 8 | }; |
| 10 | use chrono::{DateTime, Utc}; | 9 | use chrono::{DateTime, Utc}; |
| 11 | use gitolfs3_common::{Oid, Operation}; | 10 | use gitolfs3_common::{Oid, Operation}; |
| 12 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; | 11 | use serde::{Deserialize, Serialize, de::DeserializeOwned}; |
| 13 | |||
| 14 | pub const REPO_NOT_FOUND: GitLfsErrorResponse = | ||
| 15 | make_error_resp(StatusCode::NOT_FOUND, "Repository not found"); | ||
| 16 | |||
| 17 | #[derive(Clone)] | ||
| 18 | pub struct RepositoryName(pub String); | ||
| 19 | |||
| 20 | pub struct RepositoryNameRejection; | ||
| 21 | |||
| 22 | impl IntoResponse for RepositoryNameRejection { | ||
| 23 | fn into_response(self) -> Response { | ||
| 24 | (StatusCode::INTERNAL_SERVER_ERROR, "Missing repository name").into_response() | ||
| 25 | } | ||
| 26 | } | ||
| 27 | |||
| 28 | #[async_trait] | ||
| 29 | impl<S: Send + Sync> FromRequestParts<S> for RepositoryName { | ||
| 30 | type Rejection = RepositoryNameRejection; | ||
| 31 | |||
| 32 | async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> { | ||
| 33 | let Ok(Extension(repo_name)) = Extension::<Self>::from_request_parts(parts, state).await | ||
| 34 | else { | ||
| 35 | return Err(RepositoryNameRejection); | ||
| 36 | }; | ||
| 37 | Ok(repo_name) | ||
| 38 | } | ||
| 39 | } | ||
| 40 | 12 | ||
| 41 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | 13 | // ----------------------- Generic facilities ---------------------- |
| 42 | pub enum TransferAdapter { | ||
| 43 | #[serde(rename = "basic")] | ||
| 44 | Basic, | ||
| 45 | #[serde(other)] | ||
| 46 | Unknown, | ||
| 47 | } | ||
| 48 | 14 | ||
| 49 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | 15 | pub type GitLfsErrorResponse<'a> = (http::StatusCode, GitLfsJson<GitLfsErrorData<'a>>); |
| 50 | pub enum HashAlgo { | ||
| 51 | #[serde(rename = "sha256")] | ||
| 52 | Sha256, | ||
| 53 | #[serde(other)] | ||
| 54 | Unknown, | ||
| 55 | } | ||
| 56 | 16 | ||
| 57 | impl Default for HashAlgo { | 17 | #[derive(Debug, Serialize)] |
| 58 | fn default() -> Self { | 18 | pub struct GitLfsErrorData<'a> { |
| 59 | Self::Sha256 | 19 | pub message: &'a str, |
| 60 | } | ||
| 61 | } | ||
| 62 | |||
| 63 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] | ||
| 64 | pub struct BatchRequestObject { | ||
| 65 | pub oid: Oid, | ||
| 66 | pub size: i64, | ||
| 67 | } | ||
| 68 | |||
| 69 | #[derive(Debug, Serialize, Deserialize, Clone)] | ||
| 70 | struct BatchRef { | ||
| 71 | name: String, | ||
| 72 | } | 20 | } |
| 73 | 21 | ||
| 74 | fn default_transfers() -> Vec<TransferAdapter> { | 22 | pub const fn make_error_resp<'a>( |
| 75 | vec![TransferAdapter::Basic] | 23 | code: http::StatusCode, |
| 24 | message: &'a str, | ||
| 25 | ) -> GitLfsErrorResponse<'a> { | ||
| 26 | (code, GitLfsJson(Json(GitLfsErrorData { message }))) | ||
| 76 | } | 27 | } |
| 77 | 28 | ||
| 78 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] | 29 | pub const REPO_NOT_FOUND: GitLfsErrorResponse = |
| 79 | pub struct BatchRequest { | 30 | make_error_resp(http::StatusCode::NOT_FOUND, "Repository not found"); |
| 80 | pub operation: Operation, | ||
| 81 | #[serde(default = "default_transfers")] | ||
| 82 | pub transfers: Vec<TransferAdapter>, | ||
| 83 | pub objects: Vec<BatchRequestObject>, | ||
| 84 | #[serde(default)] | ||
| 85 | pub hash_algo: HashAlgo, | ||
| 86 | } | ||
| 87 | 31 | ||
| 88 | #[derive(Debug, Clone)] | 32 | #[derive(Debug, Clone)] |
| 89 | pub struct GitLfsJson<T>(pub Json<T>); | 33 | pub struct GitLfsJson<T>(pub Json<T>); |
| @@ -100,7 +44,7 @@ impl IntoResponse for GitLfsJsonRejection { | |||
| 100 | match self { | 44 | match self { |
| 101 | Self::Json(rej) => rej.into_response(), | 45 | Self::Json(rej) => rej.into_response(), |
| 102 | Self::MissingGitLfsJsonContentType => make_error_resp( | 46 | Self::MissingGitLfsJsonContentType => make_error_resp( |
| 103 | StatusCode::UNSUPPORTED_MEDIA_TYPE, | 47 | http::StatusCode::UNSUPPORTED_MEDIA_TYPE, |
| 104 | &format!("Expected request with `Content-Type: {LFS_MIME}`"), | 48 | &format!("Expected request with `Content-Type: {LFS_MIME}`"), |
| 105 | ) | 49 | ) |
| 106 | .into_response(), | 50 | .into_response(), |
| @@ -125,7 +69,7 @@ pub fn is_git_lfs_json_mimetype(mimetype: &str) -> bool { | |||
| 125 | } | 69 | } |
| 126 | 70 | ||
| 127 | fn has_git_lfs_json_content_type(req: &Request) -> bool { | 71 | fn has_git_lfs_json_content_type(req: &Request) -> bool { |
| 128 | let Some(content_type) = req.headers().get(header::CONTENT_TYPE) else { | 72 | let Some(content_type) = req.headers().get(http::header::CONTENT_TYPE) else { |
| 129 | return false; | 73 | return false; |
| 130 | }; | 74 | }; |
| 131 | let Ok(content_type) = content_type.to_str() else { | 75 | let Ok(content_type) = content_type.to_str() else { |
| @@ -134,7 +78,6 @@ fn has_git_lfs_json_content_type(req: &Request) -> bool { | |||
| 134 | is_git_lfs_json_mimetype(content_type) | 78 | is_git_lfs_json_mimetype(content_type) |
| 135 | } | 79 | } |
| 136 | 80 | ||
| 137 | #[async_trait] | ||
| 138 | impl<T, S> FromRequest<S> for GitLfsJson<T> | 81 | impl<T, S> FromRequest<S> for GitLfsJson<T> |
| 139 | where | 82 | where |
| 140 | T: DeserializeOwned, | 83 | T: DeserializeOwned, |
| @@ -158,46 +101,97 @@ impl<T: Serialize> IntoResponse for GitLfsJson<T> { | |||
| 158 | let GitLfsJson(json) = self; | 101 | let GitLfsJson(json) = self; |
| 159 | let mut resp = json.into_response(); | 102 | let mut resp = json.into_response(); |
| 160 | resp.headers_mut().insert( | 103 | resp.headers_mut().insert( |
| 161 | header::CONTENT_TYPE, | 104 | http::header::CONTENT_TYPE, |
| 162 | HeaderValue::from_static("application/vnd.git-lfs+json; charset=utf-8"), | 105 | http::HeaderValue::from_static("application/vnd.git-lfs+json; charset=utf-8"), |
| 163 | ); | 106 | ); |
| 164 | resp | 107 | resp |
| 165 | } | 108 | } |
| 166 | } | 109 | } |
| 167 | 110 | ||
| 168 | #[derive(Debug, Serialize)] | 111 | #[derive(Clone)] |
| 169 | pub struct GitLfsErrorData<'a> { | 112 | pub struct RepositoryName(pub String); |
| 170 | pub message: &'a str, | 113 | |
| 114 | pub struct RepositoryNameRejection; | ||
| 115 | |||
| 116 | impl IntoResponse for RepositoryNameRejection { | ||
| 117 | fn into_response(self) -> Response { | ||
| 118 | ( | ||
| 119 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
| 120 | "Missing repository name", | ||
| 121 | ) | ||
| 122 | .into_response() | ||
| 123 | } | ||
| 171 | } | 124 | } |
| 172 | 125 | ||
| 173 | pub type GitLfsErrorResponse<'a> = (StatusCode, GitLfsJson<GitLfsErrorData<'a>>); | 126 | impl<S: Send + Sync> FromRequestParts<S> for RepositoryName { |
| 127 | type Rejection = RepositoryNameRejection; | ||
| 174 | 128 | ||
| 175 | pub const fn make_error_resp(code: StatusCode, message: &str) -> GitLfsErrorResponse { | 129 | async fn from_request_parts( |
| 176 | (code, GitLfsJson(Json(GitLfsErrorData { message }))) | 130 | parts: &mut http::request::Parts, |
| 131 | state: &S, | ||
| 132 | ) -> Result<Self, Self::Rejection> { | ||
| 133 | let Ok(Extension(repo_name)) = Extension::<Self>::from_request_parts(parts, state).await | ||
| 134 | else { | ||
| 135 | return Err(RepositoryNameRejection); | ||
| 136 | }; | ||
| 137 | Ok(repo_name) | ||
| 138 | } | ||
| 177 | } | 139 | } |
| 178 | 140 | ||
| 179 | #[derive(Debug, Serialize, Clone)] | 141 | // ----------------------- Git LFS Batch API ----------------------- |
| 180 | pub struct BatchResponseObjectAction { | 142 | |
| 181 | pub href: String, | 143 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] |
| 182 | #[serde(skip_serializing_if = "HashMap::is_empty")] | 144 | pub struct BatchRequest { |
| 183 | pub header: HashMap<String, String>, | 145 | pub operation: Operation, |
| 184 | pub expires_at: DateTime<Utc>, | 146 | #[serde(default = "default_transfers")] |
| 147 | pub transfers: Vec<TransferAdapter>, | ||
| 148 | pub objects: Vec<BatchRequestObject>, | ||
| 149 | #[serde(default)] | ||
| 150 | pub hash_algo: HashAlgo, | ||
| 185 | } | 151 | } |
| 186 | 152 | ||
| 187 | #[derive(Default, Debug, Serialize, Clone)] | 153 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] |
| 188 | pub struct BatchResponseObjectActions { | 154 | pub struct BatchRequestObject { |
| 189 | #[serde(skip_serializing_if = "Option::is_none")] | 155 | pub oid: Oid, |
| 190 | pub upload: Option<BatchResponseObjectAction>, | 156 | pub size: i64, |
| 191 | #[serde(skip_serializing_if = "Option::is_none")] | ||
| 192 | pub download: Option<BatchResponseObjectAction>, | ||
| 193 | #[serde(skip_serializing_if = "Option::is_none")] | ||
| 194 | pub verify: Option<BatchResponseObjectAction>, | ||
| 195 | } | 157 | } |
| 196 | 158 | ||
| 197 | #[derive(Debug, Clone, Serialize)] | 159 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] |
| 198 | pub struct BatchResponseObjectError { | 160 | pub enum TransferAdapter { |
| 199 | pub code: u16, | 161 | #[serde(rename = "basic")] |
| 200 | pub message: String, | 162 | Basic, |
| 163 | #[serde(other)] | ||
| 164 | Unknown, | ||
| 165 | } | ||
| 166 | |||
| 167 | fn default_transfers() -> Vec<TransferAdapter> { | ||
| 168 | vec![TransferAdapter::Basic] | ||
| 169 | } | ||
| 170 | |||
| 171 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | ||
| 172 | pub enum HashAlgo { | ||
| 173 | #[serde(rename = "sha256")] | ||
| 174 | Sha256, | ||
| 175 | #[serde(other)] | ||
| 176 | Unknown, | ||
| 177 | } | ||
| 178 | |||
| 179 | impl Default for HashAlgo { | ||
| 180 | fn default() -> Self { | ||
| 181 | Self::Sha256 | ||
| 182 | } | ||
| 183 | } | ||
| 184 | |||
| 185 | #[derive(Debug, Serialize, Deserialize, Clone)] | ||
| 186 | struct BatchRef { | ||
| 187 | name: String, | ||
| 188 | } | ||
| 189 | |||
| 190 | #[derive(Debug, Serialize, Clone)] | ||
| 191 | pub struct BatchResponse { | ||
| 192 | pub transfer: TransferAdapter, | ||
| 193 | pub objects: Vec<BatchResponseObject>, | ||
| 194 | pub hash_algo: HashAlgo, | ||
| 201 | } | 195 | } |
| 202 | 196 | ||
| 203 | #[derive(Debug, Serialize, Clone)] | 197 | #[derive(Debug, Serialize, Clone)] |
| @@ -211,10 +205,16 @@ pub struct BatchResponseObject { | |||
| 211 | pub error: Option<BatchResponseObjectError>, | 205 | pub error: Option<BatchResponseObjectError>, |
| 212 | } | 206 | } |
| 213 | 207 | ||
| 208 | #[derive(Debug, Clone, Serialize)] | ||
| 209 | pub struct BatchResponseObjectError { | ||
| 210 | pub code: u16, | ||
| 211 | pub message: String, | ||
| 212 | } | ||
| 213 | |||
| 214 | impl BatchResponseObject { | 214 | impl BatchResponseObject { |
| 215 | pub fn error( | 215 | pub fn error( |
| 216 | obj: &BatchRequestObject, | 216 | obj: &BatchRequestObject, |
| 217 | code: StatusCode, | 217 | code: http::StatusCode, |
| 218 | message: String, | 218 | message: String, |
| 219 | ) -> BatchResponseObject { | 219 | ) -> BatchResponseObject { |
| 220 | BatchResponseObject { | 220 | BatchResponseObject { |
| @@ -231,10 +231,21 @@ impl BatchResponseObject { | |||
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | #[derive(Debug, Serialize, Clone)] | 233 | #[derive(Debug, Serialize, Clone)] |
| 234 | pub struct BatchResponse { | 234 | pub struct BatchResponseObjectAction { |
| 235 | pub transfer: TransferAdapter, | 235 | pub href: String, |
| 236 | pub objects: Vec<BatchResponseObject>, | 236 | #[serde(skip_serializing_if = "HashMap::is_empty")] |
| 237 | pub hash_algo: HashAlgo, | 237 | pub header: HashMap<String, String>, |
| 238 | pub expires_at: DateTime<Utc>, | ||
| 239 | } | ||
| 240 | |||
| 241 | #[derive(Default, Debug, Serialize, Clone)] | ||
| 242 | pub struct BatchResponseObjectActions { | ||
| 243 | #[serde(skip_serializing_if = "Option::is_none")] | ||
| 244 | pub upload: Option<BatchResponseObjectAction>, | ||
| 245 | #[serde(skip_serializing_if = "Option::is_none")] | ||
| 246 | pub download: Option<BatchResponseObjectAction>, | ||
| 247 | #[serde(skip_serializing_if = "Option::is_none")] | ||
| 248 | pub verify: Option<BatchResponseObjectAction>, | ||
| 238 | } | 249 | } |
| 239 | 250 | ||
| 240 | #[test] | 251 | #[test] |
diff --git a/gitolfs3-server/src/authz.rs b/gitolfs3-server/src/authz.rs index 0674cef..c4cb6df 100644 --- a/gitolfs3-server/src/authz.rs +++ b/gitolfs3-server/src/authz.rs | |||
| @@ -1,41 +1,22 @@ | |||
| 1 | use std::collections::HashSet; | 1 | use std::collections::HashSet; |
| 2 | 2 | ||
| 3 | use axum::http::{header, HeaderMap, StatusCode}; | 3 | use axum::http; |
| 4 | use chrono::{DateTime, Utc}; | 4 | use chrono::{DateTime, Utc}; |
| 5 | use gitolfs3_common::{generate_tag, Claims, Digest, Oid, Operation, SpecificClaims}; | 5 | use gitolfs3_common::{Claims, Digest, Oid, Operation, SpecificClaims, generate_tag}; |
| 6 | 6 | ||
| 7 | use crate::{ | 7 | use crate::{ |
| 8 | api::{make_error_resp, GitLfsErrorResponse, REPO_NOT_FOUND}, | 8 | api::{GitLfsErrorResponse, REPO_NOT_FOUND, make_error_resp}, |
| 9 | config::AuthorizationConfig, | 9 | config::AuthorizationConfig, |
| 10 | }; | 10 | }; |
| 11 | 11 | ||
| 12 | pub struct Trusted(pub bool); | 12 | pub struct Trusted(pub bool); |
| 13 | 13 | ||
| 14 | fn forwarded_from_trusted_host( | ||
| 15 | headers: &HeaderMap, | ||
| 16 | trusted: &HashSet<String>, | ||
| 17 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | ||
| 18 | if let Some(forwarded_host) = headers.get("X-Forwarded-Host") { | ||
| 19 | if let Ok(forwarded_host) = forwarded_host.to_str() { | ||
| 20 | if trusted.contains(forwarded_host) { | ||
| 21 | return Ok(true); | ||
| 22 | } | ||
| 23 | } else { | ||
| 24 | return Err(make_error_resp( | ||
| 25 | StatusCode::NOT_FOUND, | ||
| 26 | "Invalid X-Forwarded-Host header", | ||
| 27 | )); | ||
| 28 | } | ||
| 29 | } | ||
| 30 | Ok(false) | ||
| 31 | } | ||
| 32 | |||
| 33 | pub fn authorize_batch( | 14 | pub fn authorize_batch( |
| 34 | conf: &AuthorizationConfig, | 15 | conf: &AuthorizationConfig, |
| 35 | repo_path: &str, | 16 | repo_path: &str, |
| 36 | public: bool, | 17 | public: bool, |
| 37 | operation: Operation, | 18 | operation: Operation, |
| 38 | headers: &HeaderMap, | 19 | headers: &http::HeaderMap, |
| 39 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { | 20 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { |
| 40 | // - No authentication required for downloading exported repos | 21 | // - No authentication required for downloading exported repos |
| 41 | // - When authenticated: | 22 | // - When authenticated: |
| @@ -57,7 +38,7 @@ fn authorize_batch_unauthenticated( | |||
| 57 | conf: &AuthorizationConfig, | 38 | conf: &AuthorizationConfig, |
| 58 | public: bool, | 39 | public: bool, |
| 59 | operation: Operation, | 40 | operation: Operation, |
| 60 | headers: &HeaderMap, | 41 | headers: &http::HeaderMap, |
| 61 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { | 42 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { |
| 62 | let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; | 43 | let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; |
| 63 | match operation { | 44 | match operation { |
| @@ -71,7 +52,7 @@ fn authorize_batch_unauthenticated( | |||
| 71 | return Err(REPO_NOT_FOUND); | 52 | return Err(REPO_NOT_FOUND); |
| 72 | } | 53 | } |
| 73 | Err(make_error_resp( | 54 | Err(make_error_resp( |
| 74 | StatusCode::FORBIDDEN, | 55 | http::StatusCode::FORBIDDEN, |
| 75 | "Authentication required to upload", | 56 | "Authentication required to upload", |
| 76 | )) | 57 | )) |
| 77 | } | 58 | } |
| @@ -94,7 +75,7 @@ pub fn authorize_get( | |||
| 94 | conf: &AuthorizationConfig, | 75 | conf: &AuthorizationConfig, |
| 95 | repo_path: &str, | 76 | repo_path: &str, |
| 96 | oid: Oid, | 77 | oid: Oid, |
| 97 | headers: &HeaderMap, | 78 | headers: &http::HeaderMap, |
| 98 | ) -> Result<(), GitLfsErrorResponse<'static>> { | 79 | ) -> Result<(), GitLfsErrorResponse<'static>> { |
| 99 | let claims = VerifyClaimsInput { | 80 | let claims = VerifyClaimsInput { |
| 100 | specific_claims: SpecificClaims::Download(oid), | 81 | specific_claims: SpecificClaims::Download(oid), |
| @@ -102,27 +83,48 @@ pub fn authorize_get( | |||
| 102 | }; | 83 | }; |
| 103 | if !verify_claims(conf, &claims, headers)? { | 84 | if !verify_claims(conf, &claims, headers)? { |
| 104 | return Err(make_error_resp( | 85 | return Err(make_error_resp( |
| 105 | StatusCode::UNAUTHORIZED, | 86 | http::StatusCode::UNAUTHORIZED, |
| 106 | "Repository not found", | 87 | "Repository not found", |
| 107 | )); | 88 | )); |
| 108 | } | 89 | } |
| 109 | Ok(()) | 90 | Ok(()) |
| 110 | } | 91 | } |
| 111 | 92 | ||
| 112 | pub struct VerifyClaimsInput<'a> { | 93 | fn forwarded_from_trusted_host( |
| 113 | pub specific_claims: SpecificClaims, | 94 | headers: &http::HeaderMap, |
| 114 | pub repo_path: &'a str, | 95 | trusted: &HashSet<String>, |
| 96 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | ||
| 97 | if let Some(forwarded_host) = headers.get("X-Forwarded-Host") { | ||
| 98 | if let Ok(forwarded_host) = forwarded_host.to_str() { | ||
| 99 | if trusted.contains(forwarded_host) { | ||
| 100 | return Ok(true); | ||
| 101 | } | ||
| 102 | } else { | ||
| 103 | return Err(make_error_resp( | ||
| 104 | http::StatusCode::NOT_FOUND, | ||
| 105 | "Invalid X-Forwarded-Host header", | ||
| 106 | )); | ||
| 107 | } | ||
| 108 | } | ||
| 109 | Ok(false) | ||
| 110 | } | ||
| 111 | |||
| 112 | struct VerifyClaimsInput<'a> { | ||
| 113 | specific_claims: SpecificClaims, | ||
| 114 | repo_path: &'a str, | ||
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | fn verify_claims( | 117 | fn verify_claims( |
| 118 | conf: &AuthorizationConfig, | 118 | conf: &AuthorizationConfig, |
| 119 | claims: &VerifyClaimsInput, | 119 | claims: &VerifyClaimsInput, |
| 120 | headers: &HeaderMap, | 120 | headers: &http::HeaderMap, |
| 121 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | 121 | ) -> Result<bool, GitLfsErrorResponse<'static>> { |
| 122 | const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = | 122 | const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = make_error_resp( |
| 123 | make_error_resp(StatusCode::BAD_REQUEST, "Invalid authorization header"); | 123 | http::StatusCode::BAD_REQUEST, |
| 124 | "Invalid authorization header", | ||
| 125 | ); | ||
| 124 | 126 | ||
| 125 | let Some(authz) = headers.get(header::AUTHORIZATION) else { | 127 | let Some(authz) = headers.get(http::header::AUTHORIZATION) else { |
| 126 | return Ok(false); | 128 | return Ok(false); |
| 127 | }; | 129 | }; |
| 128 | let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; | 130 | let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; |
| @@ -141,7 +143,12 @@ fn verify_claims( | |||
| 141 | }, | 143 | }, |
| 142 | &conf.key, | 144 | &conf.key, |
| 143 | ) | 145 | ) |
| 144 | .ok_or_else(|| make_error_resp(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error"))?; | 146 | .ok_or_else(|| { |
| 147 | make_error_resp( | ||
| 148 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
| 149 | "Internal server error", | ||
| 150 | ) | ||
| 151 | })?; | ||
| 145 | if tag != expected_tag { | 152 | if tag != expected_tag { |
| 146 | return Err(INVALID_AUTHZ_HEADER); | 153 | return Err(INVALID_AUTHZ_HEADER); |
| 147 | } | 154 | } |
| @@ -175,8 +182,11 @@ fn test_validate_claims() { | |||
| 175 | repo_path: claims.repo_path, | 182 | repo_path: claims.repo_path, |
| 176 | specific_claims: claims.specific_claims, | 183 | specific_claims: claims.specific_claims, |
| 177 | }; | 184 | }; |
| 178 | let mut headers = HeaderMap::new(); | 185 | let mut headers = http::HeaderMap::new(); |
| 179 | headers.insert(header::AUTHORIZATION, header_value.try_into().unwrap()); | 186 | headers.insert( |
| 187 | http::header::AUTHORIZATION, | ||
| 188 | header_value.try_into().unwrap(), | ||
| 189 | ); | ||
| 180 | 190 | ||
| 181 | assert!(verify_claims(&conf, &verification_claims, &headers).unwrap()); | 191 | assert!(verify_claims(&conf, &verification_claims, &headers).unwrap()); |
| 182 | } | 192 | } |
diff --git a/gitolfs3-server/src/config.rs b/gitolfs3-server/src/config.rs index 75e84dc..5167cca 100644 --- a/gitolfs3-server/src/config.rs +++ b/gitolfs3-server/src/config.rs | |||
| @@ -1,66 +1,6 @@ | |||
| 1 | use std::collections::HashSet; | 1 | use std::collections::HashSet; |
| 2 | 2 | ||
| 3 | use gitolfs3_common::{load_key, Key}; | 3 | use gitolfs3_common::{Key, load_key}; |
| 4 | |||
| 5 | struct Env { | ||
| 6 | s3_access_key_id: String, | ||
| 7 | s3_secret_access_key: String, | ||
| 8 | s3_bucket: String, | ||
| 9 | s3_region: String, | ||
| 10 | s3_endpoint: String, | ||
| 11 | base_url: String, | ||
| 12 | key_path: String, | ||
| 13 | listen_host: String, | ||
| 14 | listen_port: String, | ||
| 15 | download_limit: String, | ||
| 16 | trusted_forwarded_hosts: String, | ||
| 17 | } | ||
| 18 | |||
| 19 | fn require_env(name: &str) -> Result<String, String> { | ||
| 20 | std::env::var(name) | ||
| 21 | .map_err(|_| format!("environment variable {name} should be defined and valid")) | ||
| 22 | } | ||
| 23 | |||
| 24 | impl Env { | ||
| 25 | fn load() -> Result<Env, String> { | ||
| 26 | Ok(Env { | ||
| 27 | s3_secret_access_key: require_env("GITOLFS3_S3_SECRET_ACCESS_KEY_FILE")?, | ||
| 28 | s3_access_key_id: require_env("GITOLFS3_S3_ACCESS_KEY_ID_FILE")?, | ||
| 29 | s3_region: require_env("GITOLFS3_S3_REGION")?, | ||
| 30 | s3_endpoint: require_env("GITOLFS3_S3_ENDPOINT")?, | ||
| 31 | s3_bucket: require_env("GITOLFS3_S3_BUCKET")?, | ||
| 32 | base_url: require_env("GITOLFS3_BASE_URL")?, | ||
| 33 | key_path: require_env("GITOLFS3_KEY_PATH")?, | ||
| 34 | listen_host: require_env("GITOLFS3_LISTEN_HOST")?, | ||
| 35 | listen_port: require_env("GITOLFS3_LISTEN_PORT")?, | ||
| 36 | download_limit: require_env("GITOLFS3_DOWNLOAD_LIMIT")?, | ||
| 37 | trusted_forwarded_hosts: std::env::var("GITOLFS3_TRUSTED_FORWARDED_HOSTS") | ||
| 38 | .unwrap_or_default(), | ||
| 39 | }) | ||
| 40 | } | ||
| 41 | } | ||
| 42 | |||
| 43 | fn get_s3_client(env: &Env) -> Result<aws_sdk_s3::Client, std::io::Error> { | ||
| 44 | let access_key_id = std::fs::read_to_string(&env.s3_access_key_id)?; | ||
| 45 | let secret_access_key = std::fs::read_to_string(&env.s3_secret_access_key)?; | ||
| 46 | |||
| 47 | let credentials = aws_sdk_s3::config::Credentials::new( | ||
| 48 | access_key_id, | ||
| 49 | secret_access_key, | ||
| 50 | None, | ||
| 51 | None, | ||
| 52 | "gitolfs3-env", | ||
| 53 | ); | ||
| 54 | let config = aws_config::SdkConfig::builder() | ||
| 55 | .behavior_version(aws_config::BehaviorVersion::latest()) | ||
| 56 | .region(aws_config::Region::new(env.s3_region.clone())) | ||
| 57 | .endpoint_url(&env.s3_endpoint) | ||
| 58 | .credentials_provider(aws_sdk_s3::config::SharedCredentialsProvider::new( | ||
| 59 | credentials, | ||
| 60 | )) | ||
| 61 | .build(); | ||
| 62 | Ok(aws_sdk_s3::Client::new(&config)) | ||
| 63 | } | ||
| 64 | 4 | ||
| 65 | pub struct Config { | 5 | pub struct Config { |
| 66 | pub listen_addr: (String, u16), | 6 | pub listen_addr: (String, u16), |
| @@ -78,19 +18,11 @@ pub struct AuthorizationConfig { | |||
| 78 | 18 | ||
| 79 | impl Config { | 19 | impl Config { |
| 80 | pub fn load() -> Result<Self, String> { | 20 | pub fn load() -> Result<Self, String> { |
| 81 | let env = match Env::load() { | 21 | let env = Env::load().map_err(|e| format!("failed to load configuration: {e}"))?; |
| 82 | Ok(env) => env, | 22 | let s3_client = |
| 83 | Err(e) => return Err(format!("failed to load configuration: {e}")), | 23 | create_s3_client(&env).map_err(|e| format!("failed to create S3 client: {e}"))?; |
| 84 | }; | 24 | let key = |
| 85 | 25 | load_key(&env.key_path).map_err(|e| format!("failed to load Gitolfs3 key: {e}"))?; | |
| 86 | let s3_client = match get_s3_client(&env) { | ||
| 87 | Ok(s3_client) => s3_client, | ||
| 88 | Err(e) => return Err(format!("failed to create S3 client: {e}")), | ||
| 89 | }; | ||
| 90 | let key = match load_key(&env.key_path) { | ||
| 91 | Ok(key) => key, | ||
| 92 | Err(e) => return Err(format!("failed to load Gitolfs3 key: {e}")), | ||
| 93 | }; | ||
| 94 | 26 | ||
| 95 | let trusted_forwarded_hosts: HashSet<String> = env | 27 | let trusted_forwarded_hosts: HashSet<String> = env |
| 96 | .trusted_forwarded_hosts | 28 | .trusted_forwarded_hosts |
| @@ -120,3 +52,63 @@ impl Config { | |||
| 120 | }) | 52 | }) |
| 121 | } | 53 | } |
| 122 | } | 54 | } |
| 55 | |||
| 56 | fn create_s3_client(env: &Env) -> Result<aws_sdk_s3::Client, std::io::Error> { | ||
| 57 | let access_key_id = std::fs::read_to_string(&env.s3_access_key_id)?; | ||
| 58 | let secret_access_key = std::fs::read_to_string(&env.s3_secret_access_key)?; | ||
| 59 | |||
| 60 | let credentials = aws_sdk_s3::config::Credentials::new( | ||
| 61 | access_key_id, | ||
| 62 | secret_access_key, | ||
| 63 | None, | ||
| 64 | None, | ||
| 65 | "gitolfs3-env", | ||
| 66 | ); | ||
| 67 | let config = aws_config::SdkConfig::builder() | ||
| 68 | .behavior_version(aws_config::BehaviorVersion::latest()) | ||
| 69 | .region(aws_config::Region::new(env.s3_region.clone())) | ||
| 70 | .endpoint_url(&env.s3_endpoint) | ||
| 71 | .credentials_provider(aws_sdk_s3::config::SharedCredentialsProvider::new( | ||
| 72 | credentials, | ||
| 73 | )) | ||
| 74 | .build(); | ||
| 75 | Ok(aws_sdk_s3::Client::new(&config)) | ||
| 76 | } | ||
| 77 | |||
| 78 | struct Env { | ||
| 79 | s3_access_key_id: String, | ||
| 80 | s3_secret_access_key: String, | ||
| 81 | s3_bucket: String, | ||
| 82 | s3_region: String, | ||
| 83 | s3_endpoint: String, | ||
| 84 | base_url: String, | ||
| 85 | key_path: String, | ||
| 86 | listen_host: String, | ||
| 87 | listen_port: String, | ||
| 88 | download_limit: String, | ||
| 89 | trusted_forwarded_hosts: String, | ||
| 90 | } | ||
| 91 | |||
| 92 | impl Env { | ||
| 93 | fn load() -> Result<Env, String> { | ||
| 94 | Ok(Env { | ||
| 95 | s3_secret_access_key: require_env("GITOLFS3_S3_SECRET_ACCESS_KEY_FILE")?, | ||
| 96 | s3_access_key_id: require_env("GITOLFS3_S3_ACCESS_KEY_ID_FILE")?, | ||
| 97 | s3_region: require_env("GITOLFS3_S3_REGION")?, | ||
| 98 | s3_endpoint: require_env("GITOLFS3_S3_ENDPOINT")?, | ||
| 99 | s3_bucket: require_env("GITOLFS3_S3_BUCKET")?, | ||
| 100 | base_url: require_env("GITOLFS3_BASE_URL")?, | ||
| 101 | key_path: require_env("GITOLFS3_KEY_PATH")?, | ||
| 102 | listen_host: require_env("GITOLFS3_LISTEN_HOST")?, | ||
| 103 | listen_port: require_env("GITOLFS3_LISTEN_PORT")?, | ||
| 104 | download_limit: require_env("GITOLFS3_DOWNLOAD_LIMIT")?, | ||
| 105 | trusted_forwarded_hosts: std::env::var("GITOLFS3_TRUSTED_FORWARDED_HOSTS") | ||
| 106 | .unwrap_or_default(), | ||
| 107 | }) | ||
| 108 | } | ||
| 109 | } | ||
| 110 | |||
| 111 | fn require_env(name: &str) -> Result<String, String> { | ||
| 112 | std::env::var(name) | ||
| 113 | .map_err(|_| format!("environment variable {name} should be defined and valid")) | ||
| 114 | } | ||
diff --git a/gitolfs3-server/src/dlimit.rs b/gitolfs3-server/src/dlimit.rs index f68bec1..7a82a18 100644 --- a/gitolfs3-server/src/dlimit.rs +++ b/gitolfs3-server/src/dlimit.rs | |||
| @@ -55,7 +55,7 @@ impl DownloadLimiter { | |||
| 55 | Ok(true) | 55 | Ok(true) |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | pub async fn reset(&mut self) { | 58 | async fn reset(&mut self) { |
| 59 | self.current = 0; | 59 | self.current = 0; |
| 60 | if let Err(e) = self.write_new_count().await { | 60 | if let Err(e) = self.write_new_count().await { |
| 61 | println!("Failed to reset download counter: {e}"); | 61 | println!("Failed to reset download counter: {e}"); |
diff --git a/gitolfs3-server/src/handler.rs b/gitolfs3-server/src/handler.rs index 6516291..c5d4a61 100644 --- a/gitolfs3-server/src/handler.rs +++ b/gitolfs3-server/src/handler.rs | |||
| @@ -2,24 +2,24 @@ use std::{collections::HashMap, sync::Arc}; | |||
| 2 | 2 | ||
| 3 | use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput}; | 3 | use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput}; |
| 4 | use axum::{ | 4 | use axum::{ |
| 5 | Json, | ||
| 5 | extract::{Path, State}, | 6 | extract::{Path, State}, |
| 6 | http::{header, HeaderMap, StatusCode}, | 7 | http, |
| 7 | response::{IntoResponse, Response}, | 8 | response::{IntoResponse, Response}, |
| 8 | Json, | ||
| 9 | }; | 9 | }; |
| 10 | use base64::{prelude::BASE64_STANDARD, Engine}; | 10 | use base64::{Engine, prelude::BASE64_STANDARD}; |
| 11 | use chrono::Utc; | 11 | use chrono::Utc; |
| 12 | use gitolfs3_common::{generate_tag, Claims, HexByte, Oid, Operation, SpecificClaims}; | 12 | use gitolfs3_common::{Claims, HexByte, Oid, Operation, SpecificClaims, generate_tag}; |
| 13 | use serde::{de, Deserialize}; | 13 | use serde::{Deserialize, de}; |
| 14 | use tokio::sync::Mutex; | 14 | use tokio::sync::Mutex; |
| 15 | 15 | ||
| 16 | use crate::{ | 16 | use crate::{ |
| 17 | api::{ | 17 | api::{ |
| 18 | is_git_lfs_json_mimetype, make_error_resp, BatchRequest, BatchRequestObject, BatchResponse, | 18 | BatchRequest, BatchRequestObject, BatchResponse, BatchResponseObject, |
| 19 | BatchResponseObject, BatchResponseObjectAction, BatchResponseObjectActions, GitLfsJson, | 19 | BatchResponseObjectAction, BatchResponseObjectActions, GitLfsJson, HashAlgo, LFS_MIME, |
| 20 | HashAlgo, RepositoryName, TransferAdapter, LFS_MIME, REPO_NOT_FOUND, | 20 | REPO_NOT_FOUND, RepositoryName, TransferAdapter, is_git_lfs_json_mimetype, make_error_resp, |
| 21 | }, | 21 | }, |
| 22 | authz::{authorize_batch, authorize_get, Trusted}, | 22 | authz::{Trusted, authorize_batch, authorize_get}, |
| 23 | config::AuthorizationConfig, | 23 | config::AuthorizationConfig, |
| 24 | dlimit::DownloadLimiter, | 24 | dlimit::DownloadLimiter, |
| 25 | }; | 25 | }; |
| @@ -33,100 +33,44 @@ pub struct AppState { | |||
| 33 | pub dl_limiter: Arc<Mutex<DownloadLimiter>>, | 33 | pub dl_limiter: Arc<Mutex<DownloadLimiter>>, |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | fn validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool { | 36 | enum ObjectStatus { |
| 37 | if let Some(checksum) = obj.checksum_sha256() { | 37 | ExistsOk { content_length: Option<i64> }, |
| 38 | if let Ok(checksum) = BASE64_STANDARD.decode(checksum) { | 38 | ExistsInconsistent, |
| 39 | if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) { | 39 | DoesNotExist, |
| 40 | return Oid::from(checksum32b) == oid; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | } | ||
| 44 | true | ||
| 45 | } | 40 | } |
| 46 | 41 | ||
| 47 | fn validate_size(expected: i64, obj: &HeadObjectOutput) -> bool { | 42 | impl AppState { |
| 48 | if let Some(length) = obj.content_length() { | 43 | async fn check_object(&self, repo: &str, obj: &BatchRequestObject) -> Result<ObjectStatus, ()> { |
| 49 | return length == expected; | 44 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); |
| 50 | } | 45 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); |
| 51 | true | ||
| 52 | } | ||
| 53 | 46 | ||
| 54 | async fn handle_upload_object( | 47 | let result = match self |
| 55 | state: &AppState, | 48 | .s3_client |
| 56 | repo: &str, | 49 | .head_object() |
| 57 | obj: &BatchRequestObject, | 50 | .bucket(&self.s3_bucket) |
| 58 | ) -> Option<BatchResponseObject> { | 51 | .key(full_path) |
| 59 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); | 52 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) |
| 60 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); | 53 | .send() |
| 61 | 54 | .await | |
| 62 | match state | 55 | { |
| 63 | .s3_client | 56 | Ok(result) => result, |
| 64 | .head_object() | 57 | Err(SdkError::ServiceError(e)) if e.err().is_not_found() => { |
| 65 | .bucket(&state.s3_bucket) | 58 | return Ok(ObjectStatus::DoesNotExist); |
| 66 | .key(full_path.clone()) | ||
| 67 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) | ||
| 68 | .send() | ||
| 69 | .await | ||
| 70 | { | ||
| 71 | Ok(result) => { | ||
| 72 | if validate_size(obj.size, &result) && validate_checksum(obj.oid, &result) { | ||
| 73 | return None; | ||
| 74 | } | 59 | } |
| 75 | } | 60 | Err(e) => { |
| 76 | Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {} | 61 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); |
| 77 | Err(e) => { | 62 | return Err(()); |
| 78 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); | 63 | } |
| 79 | return Some(BatchResponseObject::error( | 64 | }; |
| 80 | obj, | ||
| 81 | StatusCode::INTERNAL_SERVER_ERROR, | ||
| 82 | "Failed to query object information".to_string(), | ||
| 83 | )); | ||
| 84 | } | ||
| 85 | }; | ||
| 86 | |||
| 87 | let expires_in = std::time::Duration::from_secs(5 * 60); | ||
| 88 | let expires_at = Utc::now() + expires_in; | ||
| 89 | 65 | ||
| 90 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | 66 | // Scaleway actually doesn't provide SHA256 support, but maybe in the future :) |
| 91 | return Some(BatchResponseObject::error( | 67 | if !s3_validate_checksum(obj.oid, &result) || !s3_validate_size(obj.size, &result) { |
| 92 | obj, | 68 | return Ok(ObjectStatus::ExistsInconsistent); |
| 93 | StatusCode::INTERNAL_SERVER_ERROR, | 69 | } |
| 94 | "Failed to generate upload URL".to_string(), | 70 | Ok(ObjectStatus::ExistsOk { |
| 95 | )); | 71 | content_length: result.content_length(), |
| 96 | }; | 72 | }) |
| 97 | let Ok(presigned) = state | 73 | } |
| 98 | .s3_client | ||
| 99 | .put_object() | ||
| 100 | .bucket(&state.s3_bucket) | ||
| 101 | .key(full_path) | ||
| 102 | .checksum_sha256(obj.oid.to_string()) | ||
| 103 | .content_length(obj.size) | ||
| 104 | .presigned(config) | ||
| 105 | .await | ||
| 106 | else { | ||
| 107 | return Some(BatchResponseObject::error( | ||
| 108 | obj, | ||
| 109 | StatusCode::INTERNAL_SERVER_ERROR, | ||
| 110 | "Failed to generate upload URL".to_string(), | ||
| 111 | )); | ||
| 112 | }; | ||
| 113 | Some(BatchResponseObject { | ||
| 114 | oid: obj.oid, | ||
| 115 | size: obj.size, | ||
| 116 | authenticated: Some(true), | ||
| 117 | actions: BatchResponseObjectActions { | ||
| 118 | upload: Some(BatchResponseObjectAction { | ||
| 119 | header: presigned | ||
| 120 | .headers() | ||
| 121 | .map(|(k, v)| (k.to_owned(), v.to_owned())) | ||
| 122 | .collect(), | ||
| 123 | expires_at, | ||
| 124 | href: presigned.uri().to_string(), | ||
| 125 | }), | ||
| 126 | ..Default::default() | ||
| 127 | }, | ||
| 128 | error: None, | ||
| 129 | }) | ||
| 130 | } | 74 | } |
| 131 | 75 | ||
| 132 | async fn handle_download_object( | 76 | async fn handle_download_object( |
| @@ -138,42 +82,24 @@ async fn handle_download_object( | |||
| 138 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); | 82 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); |
| 139 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); | 83 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); |
| 140 | 84 | ||
| 141 | let result = match state | 85 | let content_length = match state.check_object(repo, obj).await { |
| 142 | .s3_client | 86 | Ok(ObjectStatus::ExistsOk { content_length }) => content_length, |
| 143 | .head_object() | 87 | Ok(_) => { |
| 144 | .bucket(&state.s3_bucket) | 88 | return BatchResponseObject::error( |
| 145 | .key(&full_path) | 89 | obj, |
| 146 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) | 90 | http::StatusCode::UNPROCESSABLE_ENTITY, |
| 147 | .send() | 91 | "Object corrupted".to_string(), |
| 148 | .await | 92 | ); |
| 149 | { | 93 | } |
| 150 | Ok(result) => result, | 94 | Err(_) => { |
| 151 | Err(e) => { | ||
| 152 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); | ||
| 153 | return BatchResponseObject::error( | 95 | return BatchResponseObject::error( |
| 154 | obj, | 96 | obj, |
| 155 | StatusCode::INTERNAL_SERVER_ERROR, | 97 | http::StatusCode::INTERNAL_SERVER_ERROR, |
| 156 | "Failed to query object information".to_string(), | 98 | "Failed to query object information".to_string(), |
| 157 | ); | 99 | ); |
| 158 | } | 100 | } |
| 159 | }; | 101 | }; |
| 160 | 102 | ||
| 161 | // Scaleway actually doesn't provide SHA256 suport, but maybe in the future :) | ||
| 162 | if !validate_checksum(obj.oid, &result) { | ||
| 163 | return BatchResponseObject::error( | ||
| 164 | obj, | ||
| 165 | StatusCode::UNPROCESSABLE_ENTITY, | ||
| 166 | "Object corrupted".to_string(), | ||
| 167 | ); | ||
| 168 | } | ||
| 169 | if !validate_size(obj.size, &result) { | ||
| 170 | return BatchResponseObject::error( | ||
| 171 | obj, | ||
| 172 | StatusCode::UNPROCESSABLE_ENTITY, | ||
| 173 | "Incorrect size specified (or object corrupted)".to_string(), | ||
| 174 | ); | ||
| 175 | } | ||
| 176 | |||
| 177 | let expires_in = std::time::Duration::from_secs(5 * 60); | 103 | let expires_in = std::time::Duration::from_secs(5 * 60); |
| 178 | let expires_at = Utc::now() + expires_in; | 104 | let expires_at = Utc::now() + expires_in; |
| 179 | 105 | ||
| @@ -181,7 +107,7 @@ async fn handle_download_object( | |||
| 181 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | 107 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { |
| 182 | return BatchResponseObject::error( | 108 | return BatchResponseObject::error( |
| 183 | obj, | 109 | obj, |
| 184 | StatusCode::INTERNAL_SERVER_ERROR, | 110 | http::StatusCode::INTERNAL_SERVER_ERROR, |
| 185 | "Failed to generate upload URL".to_string(), | 111 | "Failed to generate upload URL".to_string(), |
| 186 | ); | 112 | ); |
| 187 | }; | 113 | }; |
| @@ -195,7 +121,7 @@ async fn handle_download_object( | |||
| 195 | else { | 121 | else { |
| 196 | return BatchResponseObject::error( | 122 | return BatchResponseObject::error( |
| 197 | obj, | 123 | obj, |
| 198 | StatusCode::INTERNAL_SERVER_ERROR, | 124 | http::StatusCode::INTERNAL_SERVER_ERROR, |
| 199 | "Failed to generate upload URL".to_string(), | 125 | "Failed to generate upload URL".to_string(), |
| 200 | ); | 126 | ); |
| 201 | }; | 127 | }; |
| @@ -218,31 +144,31 @@ async fn handle_download_object( | |||
| 218 | }; | 144 | }; |
| 219 | } | 145 | } |
| 220 | 146 | ||
| 221 | if let Some(content_length) = result.content_length() { | 147 | if let Some(content_length) = content_length |
| 222 | if content_length > 0 { | 148 | && content_length > 0 |
| 223 | match state | 149 | { |
| 224 | .dl_limiter | 150 | match state |
| 225 | .lock() | 151 | .dl_limiter |
| 226 | .await | 152 | .lock() |
| 227 | .request(content_length as u64) | 153 | .await |
| 228 | .await | 154 | .request(content_length as u64) |
| 229 | { | 155 | .await |
| 230 | Ok(true) => {} | 156 | { |
| 231 | Ok(false) => { | 157 | Ok(true) => {} |
| 232 | return BatchResponseObject::error( | 158 | Ok(false) => { |
| 233 | obj, | 159 | return BatchResponseObject::error( |
| 234 | StatusCode::SERVICE_UNAVAILABLE, | 160 | obj, |
| 235 | "Public LFS downloads temporarily unavailable".to_string(), | 161 | http::StatusCode::SERVICE_UNAVAILABLE, |
| 236 | ); | 162 | "Public LFS downloads temporarily unavailable".to_string(), |
| 237 | } | 163 | ); |
| 238 | Err(e) => { | 164 | } |
| 239 | println!("Failed to request {content_length} bytes from download limiter: {e}"); | 165 | Err(e) => { |
| 240 | return BatchResponseObject::error( | 166 | println!("Failed to request {content_length} bytes from download limiter: {e}"); |
| 241 | obj, | 167 | return BatchResponseObject::error( |
| 242 | StatusCode::INTERNAL_SERVER_ERROR, | 168 | obj, |
| 243 | "Internal server error".to_string(), | 169 | http::StatusCode::INTERNAL_SERVER_ERROR, |
| 244 | ); | 170 | "Internal server error".to_string(), |
| 245 | } | 171 | ); |
| 246 | } | 172 | } |
| 247 | } | 173 | } |
| 248 | } | 174 | } |
| @@ -257,18 +183,11 @@ async fn handle_download_object( | |||
| 257 | ) else { | 183 | ) else { |
| 258 | return BatchResponseObject::error( | 184 | return BatchResponseObject::error( |
| 259 | obj, | 185 | obj, |
| 260 | StatusCode::INTERNAL_SERVER_ERROR, | 186 | http::StatusCode::INTERNAL_SERVER_ERROR, |
| 261 | "Internal server error".to_string(), | 187 | "Internal server error".to_string(), |
| 262 | ); | 188 | ); |
| 263 | }; | 189 | }; |
| 264 | 190 | ||
| 265 | let upload_path = format!( | ||
| 266 | "{repo}/info/lfs/objects/{}/{}/{}", | ||
| 267 | HexByte(obj.oid[0]), | ||
| 268 | HexByte(obj.oid[1]), | ||
| 269 | obj.oid, | ||
| 270 | ); | ||
| 271 | |||
| 272 | BatchResponseObject { | 191 | BatchResponseObject { |
| 273 | oid: obj.oid, | 192 | oid: obj.oid, |
| 274 | size: obj.size, | 193 | size: obj.size, |
| @@ -284,7 +203,13 @@ async fn handle_download_object( | |||
| 284 | map | 203 | map |
| 285 | }, | 204 | }, |
| 286 | expires_at, | 205 | expires_at, |
| 287 | href: format!("{}/{upload_path}", state.base_url), | 206 | href: format!( |
| 207 | "{}/{repo}/info/lfs/objects/{}/{}/{}", | ||
| 208 | state.base_url, | ||
| 209 | HexByte(obj.oid[0]), | ||
| 210 | HexByte(obj.oid[1]), | ||
| 211 | obj.oid | ||
| 212 | ), | ||
| 288 | }), | 213 | }), |
| 289 | ..Default::default() | 214 | ..Default::default() |
| 290 | }, | 215 | }, |
| @@ -292,83 +217,6 @@ async fn handle_download_object( | |||
| 292 | } | 217 | } |
| 293 | } | 218 | } |
| 294 | 219 | ||
| 295 | fn repo_exists(name: &str) -> bool { | ||
| 296 | let Ok(metadata) = std::fs::metadata(name) else { | ||
| 297 | return false; | ||
| 298 | }; | ||
| 299 | metadata.is_dir() | ||
| 300 | } | ||
| 301 | |||
| 302 | fn is_repo_public(name: &str) -> Option<bool> { | ||
| 303 | if !repo_exists(name) { | ||
| 304 | return None; | ||
| 305 | } | ||
| 306 | match std::fs::metadata(format!("{name}/git-daemon-export-ok")) { | ||
| 307 | Ok(metadata) if metadata.is_file() => Some(true), | ||
| 308 | Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false), | ||
| 309 | _ => None, | ||
| 310 | } | ||
| 311 | } | ||
| 312 | |||
| 313 | pub async fn batch( | ||
| 314 | State(state): State<Arc<AppState>>, | ||
| 315 | headers: HeaderMap, | ||
| 316 | RepositoryName(repo): RepositoryName, | ||
| 317 | GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>, | ||
| 318 | ) -> Response { | ||
| 319 | let Some(public) = is_repo_public(&repo) else { | ||
| 320 | return REPO_NOT_FOUND.into_response(); | ||
| 321 | }; | ||
| 322 | let Trusted(trusted) = match authorize_batch( | ||
| 323 | &state.authz_conf, | ||
| 324 | &repo, | ||
| 325 | public, | ||
| 326 | payload.operation, | ||
| 327 | &headers, | ||
| 328 | ) { | ||
| 329 | Ok(authn) => authn, | ||
| 330 | Err(e) => return e.into_response(), | ||
| 331 | }; | ||
| 332 | |||
| 333 | if !headers | ||
| 334 | .get_all("Accept") | ||
| 335 | .iter() | ||
| 336 | .filter_map(|v| v.to_str().ok()) | ||
| 337 | .any(is_git_lfs_json_mimetype) | ||
| 338 | { | ||
| 339 | let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types"); | ||
| 340 | return make_error_resp(StatusCode::NOT_ACCEPTABLE, &message).into_response(); | ||
| 341 | } | ||
| 342 | |||
| 343 | if payload.hash_algo != HashAlgo::Sha256 { | ||
| 344 | let message = "Unsupported hashing algorithm specified"; | ||
| 345 | return make_error_resp(StatusCode::CONFLICT, message).into_response(); | ||
| 346 | } | ||
| 347 | if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) { | ||
| 348 | let message = "Unsupported transfer adapter specified (supported: basic)"; | ||
| 349 | return make_error_resp(StatusCode::CONFLICT, message).into_response(); | ||
| 350 | } | ||
| 351 | |||
| 352 | let mut resp = BatchResponse { | ||
| 353 | transfer: TransferAdapter::Basic, | ||
| 354 | objects: vec![], | ||
| 355 | hash_algo: HashAlgo::Sha256, | ||
| 356 | }; | ||
| 357 | for obj in payload.objects { | ||
| 358 | match payload.operation { | ||
| 359 | Operation::Download => resp | ||
| 360 | .objects | ||
| 361 | .push(handle_download_object(&state, &repo, &obj, trusted).await), | ||
| 362 | Operation::Upload => { | ||
| 363 | if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await { | ||
| 364 | resp.objects.push(obj_resp); | ||
| 365 | } | ||
| 366 | } | ||
| 367 | }; | ||
| 368 | } | ||
| 369 | GitLfsJson(Json(resp)).into_response() | ||
| 370 | } | ||
| 371 | |||
| 372 | #[derive(Deserialize, Copy, Clone)] | 220 | #[derive(Deserialize, Copy, Clone)] |
| 373 | #[serde(remote = "Self")] | 221 | #[serde(remote = "Self")] |
| 374 | pub struct FileParams { | 222 | pub struct FileParams { |
| @@ -382,11 +230,11 @@ impl<'de> Deserialize<'de> for FileParams { | |||
| 382 | where | 230 | where |
| 383 | D: serde::Deserializer<'de>, | 231 | D: serde::Deserializer<'de>, |
| 384 | { | 232 | { |
| 385 | let unchecked @ FileParams { | 233 | let unchecked @ Self { |
| 386 | oid0: HexByte(oid0), | 234 | oid0: HexByte(oid0), |
| 387 | oid1: HexByte(oid1), | 235 | oid1: HexByte(oid1), |
| 388 | oid, | 236 | oid, |
| 389 | } = FileParams::deserialize(deserializer)?; | 237 | } = Self::deserialize(deserializer)?; |
| 390 | if oid0 != oid.as_bytes()[0] { | 238 | if oid0 != oid.as_bytes()[0] { |
| 391 | return Err(de::Error::custom( | 239 | return Err(de::Error::custom( |
| 392 | "first OID path part does not match first byte of full OID", | 240 | "first OID path part does not match first byte of full OID", |
| @@ -401,9 +249,9 @@ impl<'de> Deserialize<'de> for FileParams { | |||
| 401 | } | 249 | } |
| 402 | } | 250 | } |
| 403 | 251 | ||
| 404 | pub async fn obj_download( | 252 | pub async fn handle_obj_download( |
| 405 | State(state): State<Arc<AppState>>, | 253 | State(state): State<Arc<AppState>>, |
| 406 | headers: HeaderMap, | 254 | headers: http::HeaderMap, |
| 407 | RepositoryName(repo): RepositoryName, | 255 | RepositoryName(repo): RepositoryName, |
| 408 | Path(FileParams { oid0, oid1, oid }): Path<FileParams>, | 256 | Path(FileParams { oid0, oid1, oid }): Path<FileParams>, |
| 409 | ) -> Response { | 257 | ) -> Response { |
| @@ -425,26 +273,26 @@ pub async fn obj_download( | |||
| 425 | Err(e) => { | 273 | Err(e) => { |
| 426 | println!("Failed to GetObject (repo {repo}, OID {oid}): {e}"); | 274 | println!("Failed to GetObject (repo {repo}, OID {oid}): {e}"); |
| 427 | return ( | 275 | return ( |
| 428 | StatusCode::INTERNAL_SERVER_ERROR, | 276 | http::StatusCode::INTERNAL_SERVER_ERROR, |
| 429 | "Failed to query object information", | 277 | "Failed to query object information", |
| 430 | ) | 278 | ) |
| 431 | .into_response(); | 279 | .into_response(); |
| 432 | } | 280 | } |
| 433 | }; | 281 | }; |
| 434 | 282 | ||
| 435 | let mut headers = header::HeaderMap::new(); | 283 | let mut headers = http::header::HeaderMap::new(); |
| 436 | if let Some(content_type) = result.content_type { | 284 | if let Some(content_type) = result.content_type { |
| 437 | let Ok(header_value) = content_type.try_into() else { | 285 | let Ok(header_value) = content_type.try_into() else { |
| 438 | return ( | 286 | return ( |
| 439 | StatusCode::INTERNAL_SERVER_ERROR, | 287 | http::StatusCode::INTERNAL_SERVER_ERROR, |
| 440 | "Object has invalid content type", | 288 | "Object has invalid content type", |
| 441 | ) | 289 | ) |
| 442 | .into_response(); | 290 | .into_response(); |
| 443 | }; | 291 | }; |
| 444 | headers.insert(header::CONTENT_TYPE, header_value); | 292 | headers.insert(http::header::CONTENT_TYPE, header_value); |
| 445 | } | 293 | } |
| 446 | if let Some(content_length) = result.content_length { | 294 | if let Some(content_length) = result.content_length { |
| 447 | headers.insert(header::CONTENT_LENGTH, content_length.into()); | 295 | headers.insert(http::header::CONTENT_LENGTH, content_length.into()); |
| 448 | } | 296 | } |
| 449 | 297 | ||
| 450 | let async_read = result.body.into_async_read(); | 298 | let async_read = result.body.into_async_read(); |
| @@ -453,3 +301,168 @@ pub async fn obj_download( | |||
| 453 | 301 | ||
| 454 | (headers, body).into_response() | 302 | (headers, body).into_response() |
| 455 | } | 303 | } |
| 304 | |||
| 305 | async fn handle_upload_object( | ||
| 306 | state: &AppState, | ||
| 307 | repo: &str, | ||
| 308 | obj: &BatchRequestObject, | ||
| 309 | ) -> Option<BatchResponseObject> { | ||
| 310 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); | ||
| 311 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); | ||
| 312 | |||
| 313 | match state.check_object(repo, obj).await { | ||
| 314 | Ok(ObjectStatus::ExistsOk { .. }) => { | ||
| 315 | return None; | ||
| 316 | } | ||
| 317 | Ok(_) => {} | ||
| 318 | Err(_) => { | ||
| 319 | return Some(BatchResponseObject::error( | ||
| 320 | obj, | ||
| 321 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
| 322 | "Failed to query object information".to_string(), | ||
| 323 | )); | ||
| 324 | } | ||
| 325 | }; | ||
| 326 | |||
| 327 | let expires_in = std::time::Duration::from_secs(5 * 60); | ||
| 328 | let expires_at = Utc::now() + expires_in; | ||
| 329 | |||
| 330 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | ||
| 331 | return Some(BatchResponseObject::error( | ||
| 332 | obj, | ||
| 333 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
| 334 | "Failed to generate upload URL".to_string(), | ||
| 335 | )); | ||
| 336 | }; | ||
| 337 | let Ok(presigned) = state | ||
| 338 | .s3_client | ||
| 339 | .put_object() | ||
| 340 | .bucket(&state.s3_bucket) | ||
| 341 | .key(full_path) | ||
| 342 | .checksum_sha256(s3_encode_checksum(obj.oid)) | ||
| 343 | .content_length(obj.size) | ||
| 344 | .presigned(config) | ||
| 345 | .await | ||
| 346 | else { | ||
| 347 | return Some(BatchResponseObject::error( | ||
| 348 | obj, | ||
| 349 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
| 350 | "Failed to generate upload URL".to_string(), | ||
| 351 | )); | ||
| 352 | }; | ||
| 353 | Some(BatchResponseObject { | ||
| 354 | oid: obj.oid, | ||
| 355 | size: obj.size, | ||
| 356 | authenticated: Some(true), | ||
| 357 | actions: BatchResponseObjectActions { | ||
| 358 | upload: Some(BatchResponseObjectAction { | ||
| 359 | header: presigned | ||
| 360 | .headers() | ||
| 361 | .map(|(k, v)| (k.to_owned(), v.to_owned())) | ||
| 362 | .collect(), | ||
| 363 | expires_at, | ||
| 364 | href: presigned.uri().to_string(), | ||
| 365 | }), | ||
| 366 | ..Default::default() | ||
| 367 | }, | ||
| 368 | error: None, | ||
| 369 | }) | ||
| 370 | } | ||
| 371 | |||
| 372 | pub async fn handle_batch( | ||
| 373 | State(state): State<Arc<AppState>>, | ||
| 374 | headers: http::HeaderMap, | ||
| 375 | RepositoryName(repo): RepositoryName, | ||
| 376 | GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>, | ||
| 377 | ) -> Response { | ||
| 378 | let Some(public) = is_repo_public(&repo) else { | ||
| 379 | return REPO_NOT_FOUND.into_response(); | ||
| 380 | }; | ||
| 381 | let Trusted(trusted) = match authorize_batch( | ||
| 382 | &state.authz_conf, | ||
| 383 | &repo, | ||
| 384 | public, | ||
| 385 | payload.operation, | ||
| 386 | &headers, | ||
| 387 | ) { | ||
| 388 | Ok(authn) => authn, | ||
| 389 | Err(e) => return e.into_response(), | ||
| 390 | }; | ||
| 391 | |||
| 392 | if !headers | ||
| 393 | .get_all("Accept") | ||
| 394 | .iter() | ||
| 395 | .filter_map(|v| v.to_str().ok()) | ||
| 396 | .any(is_git_lfs_json_mimetype) | ||
| 397 | { | ||
| 398 | let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types"); | ||
| 399 | return make_error_resp(http::StatusCode::NOT_ACCEPTABLE, &message).into_response(); | ||
| 400 | } | ||
| 401 | |||
| 402 | if payload.hash_algo != HashAlgo::Sha256 { | ||
| 403 | let message = "Unsupported hashing algorithm specified"; | ||
| 404 | return make_error_resp(http::StatusCode::CONFLICT, message).into_response(); | ||
| 405 | } | ||
| 406 | if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) { | ||
| 407 | let message = "Unsupported transfer adapter specified (supported: basic)"; | ||
| 408 | return make_error_resp(http::StatusCode::CONFLICT, message).into_response(); | ||
| 409 | } | ||
| 410 | |||
| 411 | let mut resp = BatchResponse { | ||
| 412 | transfer: TransferAdapter::Basic, | ||
| 413 | objects: vec![], | ||
| 414 | hash_algo: HashAlgo::Sha256, | ||
| 415 | }; | ||
| 416 | for obj in payload.objects { | ||
| 417 | match payload.operation { | ||
| 418 | Operation::Download => resp | ||
| 419 | .objects | ||
| 420 | .push(handle_download_object(&state, &repo, &obj, trusted).await), | ||
| 421 | Operation::Upload => { | ||
| 422 | if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await { | ||
| 423 | resp.objects.push(obj_resp); | ||
| 424 | } | ||
| 425 | } | ||
| 426 | }; | ||
| 427 | } | ||
| 428 | GitLfsJson(Json(resp)).into_response() | ||
| 429 | } | ||
| 430 | |||
| 431 | fn s3_encode_checksum(oid: Oid) -> String { | ||
| 432 | BASE64_STANDARD.encode(oid.as_bytes()) | ||
| 433 | } | ||
| 434 | |||
| 435 | fn s3_validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool { | ||
| 436 | if let Some(checksum) = obj.checksum_sha256() | ||
| 437 | && let Ok(checksum) = BASE64_STANDARD.decode(checksum) | ||
| 438 | && let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) | ||
| 439 | { | ||
| 440 | return Oid::from(checksum32b) == oid; | ||
| 441 | } | ||
| 442 | true | ||
| 443 | } | ||
| 444 | |||
| 445 | fn s3_validate_size(expected: i64, obj: &HeadObjectOutput) -> bool { | ||
| 446 | if let Some(length) = obj.content_length() { | ||
| 447 | return length == expected; | ||
| 448 | } | ||
| 449 | true | ||
| 450 | } | ||
| 451 | |||
| 452 | fn repo_exists(name: &str) -> bool { | ||
| 453 | let Ok(metadata) = std::fs::metadata(name) else { | ||
| 454 | return false; | ||
| 455 | }; | ||
| 456 | metadata.is_dir() | ||
| 457 | } | ||
| 458 | |||
| 459 | fn is_repo_public(name: &str) -> Option<bool> { | ||
| 460 | if !repo_exists(name) { | ||
| 461 | return None; | ||
| 462 | } | ||
| 463 | match std::fs::metadata(format!("{name}/git-daemon-export-ok")) { | ||
| 464 | Ok(metadata) if metadata.is_file() => Some(true), | ||
| 465 | Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false), | ||
| 466 | _ => None, | ||
| 467 | } | ||
| 468 | } | ||
diff --git a/gitolfs3-server/src/main.rs b/gitolfs3-server/src/main.rs index c9911ed..c88de76 100644 --- a/gitolfs3-server/src/main.rs +++ b/gitolfs3-server/src/main.rs | |||
| @@ -9,13 +9,14 @@ use config::Config; | |||
| 9 | use dlimit::DownloadLimiter; | 9 | use dlimit::DownloadLimiter; |
| 10 | 10 | ||
| 11 | use axum::{ | 11 | use axum::{ |
| 12 | Router, ServiceExt, | ||
| 12 | extract::OriginalUri, | 13 | extract::OriginalUri, |
| 13 | http::{StatusCode, Uri}, | 14 | http::{self, Uri}, |
| 14 | routing::{get, post}, | 15 | routing::{get, post}, |
| 15 | Router, ServiceExt, | ||
| 16 | }; | 16 | }; |
| 17 | use handler::AppState; | 17 | use handler::{AppState, handle_batch, handle_obj_download}; |
| 18 | use std::{process::ExitCode, sync::Arc}; | 18 | use std::{process::ExitCode, sync::Arc}; |
| 19 | use tokio::net::TcpListener; | ||
| 19 | use tower::Layer; | 20 | use tower::Layer; |
| 20 | 21 | ||
| 21 | #[tokio::main] | 22 | #[tokio::main] |
| @@ -39,14 +40,14 @@ async fn main() -> ExitCode { | |||
| 39 | dl_limiter, | 40 | dl_limiter, |
| 40 | }); | 41 | }); |
| 41 | let app = Router::new() | 42 | let app = Router::new() |
| 42 | .route("/batch", post(handler::batch)) | 43 | .route("/batch", post(handle_batch)) |
| 43 | .route("/:oid0/:oid1/:oid", get(handler::obj_download)) | 44 | .route("/{oid0}/{oid1}/{oid}", get(handle_obj_download)) |
| 44 | .with_state(shared_state); | 45 | .with_state(shared_state); |
| 45 | 46 | ||
| 46 | let middleware = axum::middleware::map_request(rewrite_url); | 47 | let middleware = axum::middleware::map_request(rewrite_url); |
| 47 | let app_with_middleware = middleware.layer(app); | 48 | let app_with_middleware = middleware.layer(app); |
| 48 | 49 | ||
| 49 | let listener = match tokio::net::TcpListener::bind(conf.listen_addr).await { | 50 | let listener = match TcpListener::bind(conf.listen_addr).await { |
| 50 | Ok(listener) => listener, | 51 | Ok(listener) => listener, |
| 51 | Err(e) => { | 52 | Err(e) => { |
| 52 | println!("Failed to listen: {e}"); | 53 | println!("Failed to listen: {e}"); |
| @@ -63,25 +64,23 @@ async fn main() -> ExitCode { | |||
| 63 | } | 64 | } |
| 64 | } | 65 | } |
| 65 | 66 | ||
| 66 | async fn rewrite_url<B>( | 67 | async fn rewrite_url<B>(mut req: http::Request<B>) -> Result<http::Request<B>, http::StatusCode> { |
| 67 | mut req: axum::http::Request<B>, | ||
| 68 | ) -> Result<axum::http::Request<B>, StatusCode> { | ||
| 69 | let uri = req.uri(); | 68 | let uri = req.uri(); |
| 70 | let original_uri = OriginalUri(uri.clone()); | 69 | let original_uri = OriginalUri(uri.clone()); |
| 71 | 70 | ||
| 72 | let Some(path_and_query) = uri.path_and_query() else { | 71 | let Some(path_and_query) = uri.path_and_query() else { |
| 73 | // L @ no path & query | 72 | // L @ no path & query |
| 74 | return Err(StatusCode::BAD_REQUEST); | 73 | return Err(http::StatusCode::BAD_REQUEST); |
| 75 | }; | 74 | }; |
| 76 | let Some((repo, path)) = path_and_query.path().split_once("/info/lfs/objects") else { | 75 | let Some((repo, path)) = path_and_query.path().split_once("/info/lfs/objects") else { |
| 77 | return Err(StatusCode::NOT_FOUND); | 76 | return Err(http::StatusCode::NOT_FOUND); |
| 78 | }; | 77 | }; |
| 79 | let repo = repo | 78 | let repo = repo |
| 80 | .trim_start_matches('/') | 79 | .trim_start_matches('/') |
| 81 | .trim_end_matches('/') | 80 | .trim_end_matches('/') |
| 82 | .to_string(); | 81 | .to_string(); |
| 83 | if !path.starts_with('/') || !repo.ends_with(".git") { | 82 | if !path.starts_with('/') || !repo.ends_with(".git") { |
| 84 | return Err(StatusCode::NOT_FOUND); | 83 | return Err(http::StatusCode::NOT_FOUND); |
| 85 | } | 84 | } |
| 86 | 85 | ||
| 87 | let mut parts = uri.clone().into_parts(); | 86 | let mut parts = uri.clone().into_parts(); |
| @@ -90,7 +89,7 @@ async fn rewrite_url<B>( | |||
| 90 | Some(q) => format!("{path}?{q}").try_into().ok(), | 89 | Some(q) => format!("{path}?{q}").try_into().ok(), |
| 91 | }; | 90 | }; |
| 92 | let Ok(new_uri) = Uri::from_parts(parts) else { | 91 | let Ok(new_uri) = Uri::from_parts(parts) else { |
| 93 | return Err(StatusCode::INTERNAL_SERVER_ERROR); | 92 | return Err(http::StatusCode::INTERNAL_SERVER_ERROR); |
| 94 | }; | 93 | }; |
| 95 | 94 | ||
| 96 | *req.uri_mut() = new_uri; | 95 | *req.uri_mut() = new_uri; |