diff options
author | Rutger Broekhoff | 2024-07-12 00:29:57 +0200 |
---|---|---|
committer | Rutger Broekhoff | 2024-07-12 00:29:57 +0200 |
commit | bc709f0f23be345a1e2ccd06acd36bd5dac40bde (patch) | |
tree | 4ffe66b1ac246e0a9eab4a2649a7db5bb3a1ff0a | |
parent | 3e67a3486eed22522f4352503ef7067ca81a8050 (diff) | |
download | gitolfs3-bc709f0f23be345a1e2ccd06acd36bd5dac40bde.tar.gz gitolfs3-bc709f0f23be345a1e2ccd06acd36bd5dac40bde.zip |
Restructure server
-rw-r--r-- | gitolfs3-server/src/api.rs | 213 | ||||
-rw-r--r-- | gitolfs3-server/src/authz.rs | 80 | ||||
-rw-r--r-- | gitolfs3-server/src/config.rs | 122 | ||||
-rw-r--r-- | gitolfs3-server/src/dlimit.rs | 2 | ||||
-rw-r--r-- | gitolfs3-server/src/handler.rs | 388 | ||||
-rw-r--r-- | gitolfs3-server/src/main.rs | 23 |
6 files changed, 424 insertions, 404 deletions
diff --git a/gitolfs3-server/src/api.rs b/gitolfs3-server/src/api.rs index dba7ada..d71d188 100644 --- a/gitolfs3-server/src/api.rs +++ b/gitolfs3-server/src/api.rs | |||
@@ -3,7 +3,7 @@ use std::collections::HashMap; | |||
3 | use axum::{ | 3 | use axum::{ |
4 | async_trait, | 4 | async_trait, |
5 | extract::{rejection, FromRequest, FromRequestParts, Request}, | 5 | extract::{rejection, FromRequest, FromRequestParts, Request}, |
6 | http::{header, request::Parts, HeaderValue, StatusCode}, | 6 | http, |
7 | response::{IntoResponse, Response}, | 7 | response::{IntoResponse, Response}, |
8 | Extension, Json, | 8 | Extension, Json, |
9 | }; | 9 | }; |
@@ -11,79 +11,21 @@ use chrono::{DateTime, Utc}; | |||
11 | use gitolfs3_common::{Oid, Operation}; | 11 | use gitolfs3_common::{Oid, Operation}; |
12 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; | 12 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; |
13 | 13 | ||
14 | pub const REPO_NOT_FOUND: GitLfsErrorResponse = | 14 | // ----------------------- Generic facilities ---------------------- |
15 | make_error_resp(StatusCode::NOT_FOUND, "Repository not found"); | ||
16 | |||
17 | #[derive(Clone)] | ||
18 | pub struct RepositoryName(pub String); | ||
19 | |||
20 | pub struct RepositoryNameRejection; | ||
21 | |||
22 | impl IntoResponse for RepositoryNameRejection { | ||
23 | fn into_response(self) -> Response { | ||
24 | (StatusCode::INTERNAL_SERVER_ERROR, "Missing repository name").into_response() | ||
25 | } | ||
26 | } | ||
27 | |||
28 | #[async_trait] | ||
29 | impl<S: Send + Sync> FromRequestParts<S> for RepositoryName { | ||
30 | type Rejection = RepositoryNameRejection; | ||
31 | |||
32 | async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> { | ||
33 | let Ok(Extension(repo_name)) = Extension::<Self>::from_request_parts(parts, state).await | ||
34 | else { | ||
35 | return Err(RepositoryNameRejection); | ||
36 | }; | ||
37 | Ok(repo_name) | ||
38 | } | ||
39 | } | ||
40 | 15 | ||
41 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | 16 | pub type GitLfsErrorResponse<'a> = (http::StatusCode, GitLfsJson<GitLfsErrorData<'a>>); |
42 | pub enum TransferAdapter { | ||
43 | #[serde(rename = "basic")] | ||
44 | Basic, | ||
45 | #[serde(other)] | ||
46 | Unknown, | ||
47 | } | ||
48 | 17 | ||
49 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | 18 | #[derive(Debug, Serialize)] |
50 | pub enum HashAlgo { | 19 | pub struct GitLfsErrorData<'a> { |
51 | #[serde(rename = "sha256")] | 20 | pub message: &'a str, |
52 | Sha256, | ||
53 | #[serde(other)] | ||
54 | Unknown, | ||
55 | } | ||
56 | |||
57 | impl Default for HashAlgo { | ||
58 | fn default() -> Self { | ||
59 | Self::Sha256 | ||
60 | } | ||
61 | } | ||
62 | |||
63 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] | ||
64 | pub struct BatchRequestObject { | ||
65 | pub oid: Oid, | ||
66 | pub size: i64, | ||
67 | } | ||
68 | |||
69 | #[derive(Debug, Serialize, Deserialize, Clone)] | ||
70 | struct BatchRef { | ||
71 | name: String, | ||
72 | } | 21 | } |
73 | 22 | ||
74 | fn default_transfers() -> Vec<TransferAdapter> { | 23 | pub const fn make_error_resp(code: http::StatusCode, message: &str) -> GitLfsErrorResponse { |
75 | vec![TransferAdapter::Basic] | 24 | (code, GitLfsJson(Json(GitLfsErrorData { message }))) |
76 | } | 25 | } |
77 | 26 | ||
78 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] | 27 | pub const REPO_NOT_FOUND: GitLfsErrorResponse = |
79 | pub struct BatchRequest { | 28 | make_error_resp(http::StatusCode::NOT_FOUND, "Repository not found"); |
80 | pub operation: Operation, | ||
81 | #[serde(default = "default_transfers")] | ||
82 | pub transfers: Vec<TransferAdapter>, | ||
83 | pub objects: Vec<BatchRequestObject>, | ||
84 | #[serde(default)] | ||
85 | pub hash_algo: HashAlgo, | ||
86 | } | ||
87 | 29 | ||
88 | #[derive(Debug, Clone)] | 30 | #[derive(Debug, Clone)] |
89 | pub struct GitLfsJson<T>(pub Json<T>); | 31 | pub struct GitLfsJson<T>(pub Json<T>); |
@@ -100,7 +42,7 @@ impl IntoResponse for GitLfsJsonRejection { | |||
100 | match self { | 42 | match self { |
101 | Self::Json(rej) => rej.into_response(), | 43 | Self::Json(rej) => rej.into_response(), |
102 | Self::MissingGitLfsJsonContentType => make_error_resp( | 44 | Self::MissingGitLfsJsonContentType => make_error_resp( |
103 | StatusCode::UNSUPPORTED_MEDIA_TYPE, | 45 | http::StatusCode::UNSUPPORTED_MEDIA_TYPE, |
104 | &format!("Expected request with `Content-Type: {LFS_MIME}`"), | 46 | &format!("Expected request with `Content-Type: {LFS_MIME}`"), |
105 | ) | 47 | ) |
106 | .into_response(), | 48 | .into_response(), |
@@ -125,7 +67,7 @@ pub fn is_git_lfs_json_mimetype(mimetype: &str) -> bool { | |||
125 | } | 67 | } |
126 | 68 | ||
127 | fn has_git_lfs_json_content_type(req: &Request) -> bool { | 69 | fn has_git_lfs_json_content_type(req: &Request) -> bool { |
128 | let Some(content_type) = req.headers().get(header::CONTENT_TYPE) else { | 70 | let Some(content_type) = req.headers().get(http::header::CONTENT_TYPE) else { |
129 | return false; | 71 | return false; |
130 | }; | 72 | }; |
131 | let Ok(content_type) = content_type.to_str() else { | 73 | let Ok(content_type) = content_type.to_str() else { |
@@ -158,46 +100,98 @@ impl<T: Serialize> IntoResponse for GitLfsJson<T> { | |||
158 | let GitLfsJson(json) = self; | 100 | let GitLfsJson(json) = self; |
159 | let mut resp = json.into_response(); | 101 | let mut resp = json.into_response(); |
160 | resp.headers_mut().insert( | 102 | resp.headers_mut().insert( |
161 | header::CONTENT_TYPE, | 103 | http::header::CONTENT_TYPE, |
162 | HeaderValue::from_static("application/vnd.git-lfs+json; charset=utf-8"), | 104 | http::HeaderValue::from_static("application/vnd.git-lfs+json; charset=utf-8"), |
163 | ); | 105 | ); |
164 | resp | 106 | resp |
165 | } | 107 | } |
166 | } | 108 | } |
167 | 109 | ||
168 | #[derive(Debug, Serialize)] | 110 | #[derive(Clone)] |
169 | pub struct GitLfsErrorData<'a> { | 111 | pub struct RepositoryName(pub String); |
170 | pub message: &'a str, | 112 | |
113 | pub struct RepositoryNameRejection; | ||
114 | |||
115 | impl IntoResponse for RepositoryNameRejection { | ||
116 | fn into_response(self) -> Response { | ||
117 | ( | ||
118 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
119 | "Missing repository name", | ||
120 | ) | ||
121 | .into_response() | ||
122 | } | ||
171 | } | 123 | } |
172 | 124 | ||
173 | pub type GitLfsErrorResponse<'a> = (StatusCode, GitLfsJson<GitLfsErrorData<'a>>); | 125 | #[async_trait] |
126 | impl<S: Send + Sync> FromRequestParts<S> for RepositoryName { | ||
127 | type Rejection = RepositoryNameRejection; | ||
174 | 128 | ||
175 | pub const fn make_error_resp(code: StatusCode, message: &str) -> GitLfsErrorResponse { | 129 | async fn from_request_parts( |
176 | (code, GitLfsJson(Json(GitLfsErrorData { message }))) | 130 | parts: &mut http::request::Parts, |
131 | state: &S, | ||
132 | ) -> Result<Self, Self::Rejection> { | ||
133 | let Ok(Extension(repo_name)) = Extension::<Self>::from_request_parts(parts, state).await | ||
134 | else { | ||
135 | return Err(RepositoryNameRejection); | ||
136 | }; | ||
137 | Ok(repo_name) | ||
138 | } | ||
177 | } | 139 | } |
178 | 140 | ||
179 | #[derive(Debug, Serialize, Clone)] | 141 | // ----------------------- Git LFS Batch API ----------------------- |
180 | pub struct BatchResponseObjectAction { | 142 | |
181 | pub href: String, | 143 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] |
182 | #[serde(skip_serializing_if = "HashMap::is_empty")] | 144 | pub struct BatchRequest { |
183 | pub header: HashMap<String, String>, | 145 | pub operation: Operation, |
184 | pub expires_at: DateTime<Utc>, | 146 | #[serde(default = "default_transfers")] |
147 | pub transfers: Vec<TransferAdapter>, | ||
148 | pub objects: Vec<BatchRequestObject>, | ||
149 | #[serde(default)] | ||
150 | pub hash_algo: HashAlgo, | ||
185 | } | 151 | } |
186 | 152 | ||
187 | #[derive(Default, Debug, Serialize, Clone)] | 153 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] |
188 | pub struct BatchResponseObjectActions { | 154 | pub struct BatchRequestObject { |
189 | #[serde(skip_serializing_if = "Option::is_none")] | 155 | pub oid: Oid, |
190 | pub upload: Option<BatchResponseObjectAction>, | 156 | pub size: i64, |
191 | #[serde(skip_serializing_if = "Option::is_none")] | ||
192 | pub download: Option<BatchResponseObjectAction>, | ||
193 | #[serde(skip_serializing_if = "Option::is_none")] | ||
194 | pub verify: Option<BatchResponseObjectAction>, | ||
195 | } | 157 | } |
196 | 158 | ||
197 | #[derive(Debug, Clone, Serialize)] | 159 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] |
198 | pub struct BatchResponseObjectError { | 160 | pub enum TransferAdapter { |
199 | pub code: u16, | 161 | #[serde(rename = "basic")] |
200 | pub message: String, | 162 | Basic, |
163 | #[serde(other)] | ||
164 | Unknown, | ||
165 | } | ||
166 | |||
167 | fn default_transfers() -> Vec<TransferAdapter> { | ||
168 | vec![TransferAdapter::Basic] | ||
169 | } | ||
170 | |||
171 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | ||
172 | pub enum HashAlgo { | ||
173 | #[serde(rename = "sha256")] | ||
174 | Sha256, | ||
175 | #[serde(other)] | ||
176 | Unknown, | ||
177 | } | ||
178 | |||
179 | impl Default for HashAlgo { | ||
180 | fn default() -> Self { | ||
181 | Self::Sha256 | ||
182 | } | ||
183 | } | ||
184 | |||
185 | #[derive(Debug, Serialize, Deserialize, Clone)] | ||
186 | struct BatchRef { | ||
187 | name: String, | ||
188 | } | ||
189 | |||
190 | #[derive(Debug, Serialize, Clone)] | ||
191 | pub struct BatchResponse { | ||
192 | pub transfer: TransferAdapter, | ||
193 | pub objects: Vec<BatchResponseObject>, | ||
194 | pub hash_algo: HashAlgo, | ||
201 | } | 195 | } |
202 | 196 | ||
203 | #[derive(Debug, Serialize, Clone)] | 197 | #[derive(Debug, Serialize, Clone)] |
@@ -211,10 +205,16 @@ pub struct BatchResponseObject { | |||
211 | pub error: Option<BatchResponseObjectError>, | 205 | pub error: Option<BatchResponseObjectError>, |
212 | } | 206 | } |
213 | 207 | ||
208 | #[derive(Debug, Clone, Serialize)] | ||
209 | pub struct BatchResponseObjectError { | ||
210 | pub code: u16, | ||
211 | pub message: String, | ||
212 | } | ||
213 | |||
214 | impl BatchResponseObject { | 214 | impl BatchResponseObject { |
215 | pub fn error( | 215 | pub fn error( |
216 | obj: &BatchRequestObject, | 216 | obj: &BatchRequestObject, |
217 | code: StatusCode, | 217 | code: http::StatusCode, |
218 | message: String, | 218 | message: String, |
219 | ) -> BatchResponseObject { | 219 | ) -> BatchResponseObject { |
220 | BatchResponseObject { | 220 | BatchResponseObject { |
@@ -231,10 +231,21 @@ impl BatchResponseObject { | |||
231 | } | 231 | } |
232 | 232 | ||
233 | #[derive(Debug, Serialize, Clone)] | 233 | #[derive(Debug, Serialize, Clone)] |
234 | pub struct BatchResponse { | 234 | pub struct BatchResponseObjectAction { |
235 | pub transfer: TransferAdapter, | 235 | pub href: String, |
236 | pub objects: Vec<BatchResponseObject>, | 236 | #[serde(skip_serializing_if = "HashMap::is_empty")] |
237 | pub hash_algo: HashAlgo, | 237 | pub header: HashMap<String, String>, |
238 | pub expires_at: DateTime<Utc>, | ||
239 | } | ||
240 | |||
241 | #[derive(Default, Debug, Serialize, Clone)] | ||
242 | pub struct BatchResponseObjectActions { | ||
243 | #[serde(skip_serializing_if = "Option::is_none")] | ||
244 | pub upload: Option<BatchResponseObjectAction>, | ||
245 | #[serde(skip_serializing_if = "Option::is_none")] | ||
246 | pub download: Option<BatchResponseObjectAction>, | ||
247 | #[serde(skip_serializing_if = "Option::is_none")] | ||
248 | pub verify: Option<BatchResponseObjectAction>, | ||
238 | } | 249 | } |
239 | 250 | ||
240 | #[test] | 251 | #[test] |
diff --git a/gitolfs3-server/src/authz.rs b/gitolfs3-server/src/authz.rs index 0674cef..8a5f21f 100644 --- a/gitolfs3-server/src/authz.rs +++ b/gitolfs3-server/src/authz.rs | |||
@@ -1,6 +1,6 @@ | |||
1 | use std::collections::HashSet; | 1 | use std::collections::HashSet; |
2 | 2 | ||
3 | use axum::http::{header, HeaderMap, StatusCode}; | 3 | use axum::http; |
4 | use chrono::{DateTime, Utc}; | 4 | use chrono::{DateTime, Utc}; |
5 | use gitolfs3_common::{generate_tag, Claims, Digest, Oid, Operation, SpecificClaims}; | 5 | use gitolfs3_common::{generate_tag, Claims, Digest, Oid, Operation, SpecificClaims}; |
6 | 6 | ||
@@ -11,31 +11,12 @@ use crate::{ | |||
11 | 11 | ||
12 | pub struct Trusted(pub bool); | 12 | pub struct Trusted(pub bool); |
13 | 13 | ||
14 | fn forwarded_from_trusted_host( | ||
15 | headers: &HeaderMap, | ||
16 | trusted: &HashSet<String>, | ||
17 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | ||
18 | if let Some(forwarded_host) = headers.get("X-Forwarded-Host") { | ||
19 | if let Ok(forwarded_host) = forwarded_host.to_str() { | ||
20 | if trusted.contains(forwarded_host) { | ||
21 | return Ok(true); | ||
22 | } | ||
23 | } else { | ||
24 | return Err(make_error_resp( | ||
25 | StatusCode::NOT_FOUND, | ||
26 | "Invalid X-Forwarded-Host header", | ||
27 | )); | ||
28 | } | ||
29 | } | ||
30 | Ok(false) | ||
31 | } | ||
32 | |||
33 | pub fn authorize_batch( | 14 | pub fn authorize_batch( |
34 | conf: &AuthorizationConfig, | 15 | conf: &AuthorizationConfig, |
35 | repo_path: &str, | 16 | repo_path: &str, |
36 | public: bool, | 17 | public: bool, |
37 | operation: Operation, | 18 | operation: Operation, |
38 | headers: &HeaderMap, | 19 | headers: &http::HeaderMap, |
39 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { | 20 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { |
40 | // - No authentication required for downloading exported repos | 21 | // - No authentication required for downloading exported repos |
41 | // - When authenticated: | 22 | // - When authenticated: |
@@ -57,7 +38,7 @@ fn authorize_batch_unauthenticated( | |||
57 | conf: &AuthorizationConfig, | 38 | conf: &AuthorizationConfig, |
58 | public: bool, | 39 | public: bool, |
59 | operation: Operation, | 40 | operation: Operation, |
60 | headers: &HeaderMap, | 41 | headers: &http::HeaderMap, |
61 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { | 42 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { |
62 | let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; | 43 | let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; |
63 | match operation { | 44 | match operation { |
@@ -71,7 +52,7 @@ fn authorize_batch_unauthenticated( | |||
71 | return Err(REPO_NOT_FOUND); | 52 | return Err(REPO_NOT_FOUND); |
72 | } | 53 | } |
73 | Err(make_error_resp( | 54 | Err(make_error_resp( |
74 | StatusCode::FORBIDDEN, | 55 | http::StatusCode::FORBIDDEN, |
75 | "Authentication required to upload", | 56 | "Authentication required to upload", |
76 | )) | 57 | )) |
77 | } | 58 | } |
@@ -94,7 +75,7 @@ pub fn authorize_get( | |||
94 | conf: &AuthorizationConfig, | 75 | conf: &AuthorizationConfig, |
95 | repo_path: &str, | 76 | repo_path: &str, |
96 | oid: Oid, | 77 | oid: Oid, |
97 | headers: &HeaderMap, | 78 | headers: &http::HeaderMap, |
98 | ) -> Result<(), GitLfsErrorResponse<'static>> { | 79 | ) -> Result<(), GitLfsErrorResponse<'static>> { |
99 | let claims = VerifyClaimsInput { | 80 | let claims = VerifyClaimsInput { |
100 | specific_claims: SpecificClaims::Download(oid), | 81 | specific_claims: SpecificClaims::Download(oid), |
@@ -102,27 +83,48 @@ pub fn authorize_get( | |||
102 | }; | 83 | }; |
103 | if !verify_claims(conf, &claims, headers)? { | 84 | if !verify_claims(conf, &claims, headers)? { |
104 | return Err(make_error_resp( | 85 | return Err(make_error_resp( |
105 | StatusCode::UNAUTHORIZED, | 86 | http::StatusCode::UNAUTHORIZED, |
106 | "Repository not found", | 87 | "Repository not found", |
107 | )); | 88 | )); |
108 | } | 89 | } |
109 | Ok(()) | 90 | Ok(()) |
110 | } | 91 | } |
111 | 92 | ||
112 | pub struct VerifyClaimsInput<'a> { | 93 | fn forwarded_from_trusted_host( |
113 | pub specific_claims: SpecificClaims, | 94 | headers: &http::HeaderMap, |
114 | pub repo_path: &'a str, | 95 | trusted: &HashSet<String>, |
96 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | ||
97 | if let Some(forwarded_host) = headers.get("X-Forwarded-Host") { | ||
98 | if let Ok(forwarded_host) = forwarded_host.to_str() { | ||
99 | if trusted.contains(forwarded_host) { | ||
100 | return Ok(true); | ||
101 | } | ||
102 | } else { | ||
103 | return Err(make_error_resp( | ||
104 | http::StatusCode::NOT_FOUND, | ||
105 | "Invalid X-Forwarded-Host header", | ||
106 | )); | ||
107 | } | ||
108 | } | ||
109 | Ok(false) | ||
110 | } | ||
111 | |||
112 | struct VerifyClaimsInput<'a> { | ||
113 | specific_claims: SpecificClaims, | ||
114 | repo_path: &'a str, | ||
115 | } | 115 | } |
116 | 116 | ||
117 | fn verify_claims( | 117 | fn verify_claims( |
118 | conf: &AuthorizationConfig, | 118 | conf: &AuthorizationConfig, |
119 | claims: &VerifyClaimsInput, | 119 | claims: &VerifyClaimsInput, |
120 | headers: &HeaderMap, | 120 | headers: &http::HeaderMap, |
121 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | 121 | ) -> Result<bool, GitLfsErrorResponse<'static>> { |
122 | const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = | 122 | const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = make_error_resp( |
123 | make_error_resp(StatusCode::BAD_REQUEST, "Invalid authorization header"); | 123 | http::StatusCode::BAD_REQUEST, |
124 | "Invalid authorization header", | ||
125 | ); | ||
124 | 126 | ||
125 | let Some(authz) = headers.get(header::AUTHORIZATION) else { | 127 | let Some(authz) = headers.get(http::header::AUTHORIZATION) else { |
126 | return Ok(false); | 128 | return Ok(false); |
127 | }; | 129 | }; |
128 | let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; | 130 | let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; |
@@ -141,7 +143,12 @@ fn verify_claims( | |||
141 | }, | 143 | }, |
142 | &conf.key, | 144 | &conf.key, |
143 | ) | 145 | ) |
144 | .ok_or_else(|| make_error_resp(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error"))?; | 146 | .ok_or_else(|| { |
147 | make_error_resp( | ||
148 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
149 | "Internal server error", | ||
150 | ) | ||
151 | })?; | ||
145 | if tag != expected_tag { | 152 | if tag != expected_tag { |
146 | return Err(INVALID_AUTHZ_HEADER); | 153 | return Err(INVALID_AUTHZ_HEADER); |
147 | } | 154 | } |
@@ -175,8 +182,11 @@ fn test_validate_claims() { | |||
175 | repo_path: claims.repo_path, | 182 | repo_path: claims.repo_path, |
176 | specific_claims: claims.specific_claims, | 183 | specific_claims: claims.specific_claims, |
177 | }; | 184 | }; |
178 | let mut headers = HeaderMap::new(); | 185 | let mut headers = http::HeaderMap::new(); |
179 | headers.insert(header::AUTHORIZATION, header_value.try_into().unwrap()); | 186 | headers.insert( |
187 | http::header::AUTHORIZATION, | ||
188 | header_value.try_into().unwrap(), | ||
189 | ); | ||
180 | 190 | ||
181 | assert!(verify_claims(&conf, &verification_claims, &headers).unwrap()); | 191 | assert!(verify_claims(&conf, &verification_claims, &headers).unwrap()); |
182 | } | 192 | } |
diff --git a/gitolfs3-server/src/config.rs b/gitolfs3-server/src/config.rs index 75e84dc..c6a51a5 100644 --- a/gitolfs3-server/src/config.rs +++ b/gitolfs3-server/src/config.rs | |||
@@ -2,66 +2,6 @@ use std::collections::HashSet; | |||
2 | 2 | ||
3 | use gitolfs3_common::{load_key, Key}; | 3 | use gitolfs3_common::{load_key, Key}; |
4 | 4 | ||
5 | struct Env { | ||
6 | s3_access_key_id: String, | ||
7 | s3_secret_access_key: String, | ||
8 | s3_bucket: String, | ||
9 | s3_region: String, | ||
10 | s3_endpoint: String, | ||
11 | base_url: String, | ||
12 | key_path: String, | ||
13 | listen_host: String, | ||
14 | listen_port: String, | ||
15 | download_limit: String, | ||
16 | trusted_forwarded_hosts: String, | ||
17 | } | ||
18 | |||
19 | fn require_env(name: &str) -> Result<String, String> { | ||
20 | std::env::var(name) | ||
21 | .map_err(|_| format!("environment variable {name} should be defined and valid")) | ||
22 | } | ||
23 | |||
24 | impl Env { | ||
25 | fn load() -> Result<Env, String> { | ||
26 | Ok(Env { | ||
27 | s3_secret_access_key: require_env("GITOLFS3_S3_SECRET_ACCESS_KEY_FILE")?, | ||
28 | s3_access_key_id: require_env("GITOLFS3_S3_ACCESS_KEY_ID_FILE")?, | ||
29 | s3_region: require_env("GITOLFS3_S3_REGION")?, | ||
30 | s3_endpoint: require_env("GITOLFS3_S3_ENDPOINT")?, | ||
31 | s3_bucket: require_env("GITOLFS3_S3_BUCKET")?, | ||
32 | base_url: require_env("GITOLFS3_BASE_URL")?, | ||
33 | key_path: require_env("GITOLFS3_KEY_PATH")?, | ||
34 | listen_host: require_env("GITOLFS3_LISTEN_HOST")?, | ||
35 | listen_port: require_env("GITOLFS3_LISTEN_PORT")?, | ||
36 | download_limit: require_env("GITOLFS3_DOWNLOAD_LIMIT")?, | ||
37 | trusted_forwarded_hosts: std::env::var("GITOLFS3_TRUSTED_FORWARDED_HOSTS") | ||
38 | .unwrap_or_default(), | ||
39 | }) | ||
40 | } | ||
41 | } | ||
42 | |||
43 | fn get_s3_client(env: &Env) -> Result<aws_sdk_s3::Client, std::io::Error> { | ||
44 | let access_key_id = std::fs::read_to_string(&env.s3_access_key_id)?; | ||
45 | let secret_access_key = std::fs::read_to_string(&env.s3_secret_access_key)?; | ||
46 | |||
47 | let credentials = aws_sdk_s3::config::Credentials::new( | ||
48 | access_key_id, | ||
49 | secret_access_key, | ||
50 | None, | ||
51 | None, | ||
52 | "gitolfs3-env", | ||
53 | ); | ||
54 | let config = aws_config::SdkConfig::builder() | ||
55 | .behavior_version(aws_config::BehaviorVersion::latest()) | ||
56 | .region(aws_config::Region::new(env.s3_region.clone())) | ||
57 | .endpoint_url(&env.s3_endpoint) | ||
58 | .credentials_provider(aws_sdk_s3::config::SharedCredentialsProvider::new( | ||
59 | credentials, | ||
60 | )) | ||
61 | .build(); | ||
62 | Ok(aws_sdk_s3::Client::new(&config)) | ||
63 | } | ||
64 | |||
65 | pub struct Config { | 5 | pub struct Config { |
66 | pub listen_addr: (String, u16), | 6 | pub listen_addr: (String, u16), |
67 | pub base_url: String, | 7 | pub base_url: String, |
@@ -83,7 +23,7 @@ impl Config { | |||
83 | Err(e) => return Err(format!("failed to load configuration: {e}")), | 23 | Err(e) => return Err(format!("failed to load configuration: {e}")), |
84 | }; | 24 | }; |
85 | 25 | ||
86 | let s3_client = match get_s3_client(&env) { | 26 | let s3_client = match create_s3_client(&env) { |
87 | Ok(s3_client) => s3_client, | 27 | Ok(s3_client) => s3_client, |
88 | Err(e) => return Err(format!("failed to create S3 client: {e}")), | 28 | Err(e) => return Err(format!("failed to create S3 client: {e}")), |
89 | }; | 29 | }; |
@@ -120,3 +60,63 @@ impl Config { | |||
120 | }) | 60 | }) |
121 | } | 61 | } |
122 | } | 62 | } |
63 | |||
64 | fn create_s3_client(env: &Env) -> Result<aws_sdk_s3::Client, std::io::Error> { | ||
65 | let access_key_id = std::fs::read_to_string(&env.s3_access_key_id)?; | ||
66 | let secret_access_key = std::fs::read_to_string(&env.s3_secret_access_key)?; | ||
67 | |||
68 | let credentials = aws_sdk_s3::config::Credentials::new( | ||
69 | access_key_id, | ||
70 | secret_access_key, | ||
71 | None, | ||
72 | None, | ||
73 | "gitolfs3-env", | ||
74 | ); | ||
75 | let config = aws_config::SdkConfig::builder() | ||
76 | .behavior_version(aws_config::BehaviorVersion::latest()) | ||
77 | .region(aws_config::Region::new(env.s3_region.clone())) | ||
78 | .endpoint_url(&env.s3_endpoint) | ||
79 | .credentials_provider(aws_sdk_s3::config::SharedCredentialsProvider::new( | ||
80 | credentials, | ||
81 | )) | ||
82 | .build(); | ||
83 | Ok(aws_sdk_s3::Client::new(&config)) | ||
84 | } | ||
85 | |||
86 | struct Env { | ||
87 | s3_access_key_id: String, | ||
88 | s3_secret_access_key: String, | ||
89 | s3_bucket: String, | ||
90 | s3_region: String, | ||
91 | s3_endpoint: String, | ||
92 | base_url: String, | ||
93 | key_path: String, | ||
94 | listen_host: String, | ||
95 | listen_port: String, | ||
96 | download_limit: String, | ||
97 | trusted_forwarded_hosts: String, | ||
98 | } | ||
99 | |||
100 | impl Env { | ||
101 | fn load() -> Result<Env, String> { | ||
102 | Ok(Env { | ||
103 | s3_secret_access_key: require_env("GITOLFS3_S3_SECRET_ACCESS_KEY_FILE")?, | ||
104 | s3_access_key_id: require_env("GITOLFS3_S3_ACCESS_KEY_ID_FILE")?, | ||
105 | s3_region: require_env("GITOLFS3_S3_REGION")?, | ||
106 | s3_endpoint: require_env("GITOLFS3_S3_ENDPOINT")?, | ||
107 | s3_bucket: require_env("GITOLFS3_S3_BUCKET")?, | ||
108 | base_url: require_env("GITOLFS3_BASE_URL")?, | ||
109 | key_path: require_env("GITOLFS3_KEY_PATH")?, | ||
110 | listen_host: require_env("GITOLFS3_LISTEN_HOST")?, | ||
111 | listen_port: require_env("GITOLFS3_LISTEN_PORT")?, | ||
112 | download_limit: require_env("GITOLFS3_DOWNLOAD_LIMIT")?, | ||
113 | trusted_forwarded_hosts: std::env::var("GITOLFS3_TRUSTED_FORWARDED_HOSTS") | ||
114 | .unwrap_or_default(), | ||
115 | }) | ||
116 | } | ||
117 | } | ||
118 | |||
119 | fn require_env(name: &str) -> Result<String, String> { | ||
120 | std::env::var(name) | ||
121 | .map_err(|_| format!("environment variable {name} should be defined and valid")) | ||
122 | } | ||
diff --git a/gitolfs3-server/src/dlimit.rs b/gitolfs3-server/src/dlimit.rs index f68bec1..7a82a18 100644 --- a/gitolfs3-server/src/dlimit.rs +++ b/gitolfs3-server/src/dlimit.rs | |||
@@ -55,7 +55,7 @@ impl DownloadLimiter { | |||
55 | Ok(true) | 55 | Ok(true) |
56 | } | 56 | } |
57 | 57 | ||
58 | pub async fn reset(&mut self) { | 58 | async fn reset(&mut self) { |
59 | self.current = 0; | 59 | self.current = 0; |
60 | if let Err(e) = self.write_new_count().await { | 60 | if let Err(e) = self.write_new_count().await { |
61 | println!("Failed to reset download counter: {e}"); | 61 | println!("Failed to reset download counter: {e}"); |
diff --git a/gitolfs3-server/src/handler.rs b/gitolfs3-server/src/handler.rs index 6516291..b9f9bcf 100644 --- a/gitolfs3-server/src/handler.rs +++ b/gitolfs3-server/src/handler.rs | |||
@@ -3,7 +3,7 @@ use std::{collections::HashMap, sync::Arc}; | |||
3 | use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput}; | 3 | use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput}; |
4 | use axum::{ | 4 | use axum::{ |
5 | extract::{Path, State}, | 5 | extract::{Path, State}, |
6 | http::{header, HeaderMap, StatusCode}, | 6 | http, |
7 | response::{IntoResponse, Response}, | 7 | response::{IntoResponse, Response}, |
8 | Json, | 8 | Json, |
9 | }; | 9 | }; |
@@ -33,102 +33,6 @@ pub struct AppState { | |||
33 | pub dl_limiter: Arc<Mutex<DownloadLimiter>>, | 33 | pub dl_limiter: Arc<Mutex<DownloadLimiter>>, |
34 | } | 34 | } |
35 | 35 | ||
36 | fn validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool { | ||
37 | if let Some(checksum) = obj.checksum_sha256() { | ||
38 | if let Ok(checksum) = BASE64_STANDARD.decode(checksum) { | ||
39 | if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) { | ||
40 | return Oid::from(checksum32b) == oid; | ||
41 | } | ||
42 | } | ||
43 | } | ||
44 | true | ||
45 | } | ||
46 | |||
47 | fn validate_size(expected: i64, obj: &HeadObjectOutput) -> bool { | ||
48 | if let Some(length) = obj.content_length() { | ||
49 | return length == expected; | ||
50 | } | ||
51 | true | ||
52 | } | ||
53 | |||
54 | async fn handle_upload_object( | ||
55 | state: &AppState, | ||
56 | repo: &str, | ||
57 | obj: &BatchRequestObject, | ||
58 | ) -> Option<BatchResponseObject> { | ||
59 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); | ||
60 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); | ||
61 | |||
62 | match state | ||
63 | .s3_client | ||
64 | .head_object() | ||
65 | .bucket(&state.s3_bucket) | ||
66 | .key(full_path.clone()) | ||
67 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) | ||
68 | .send() | ||
69 | .await | ||
70 | { | ||
71 | Ok(result) => { | ||
72 | if validate_size(obj.size, &result) && validate_checksum(obj.oid, &result) { | ||
73 | return None; | ||
74 | } | ||
75 | } | ||
76 | Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {} | ||
77 | Err(e) => { | ||
78 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); | ||
79 | return Some(BatchResponseObject::error( | ||
80 | obj, | ||
81 | StatusCode::INTERNAL_SERVER_ERROR, | ||
82 | "Failed to query object information".to_string(), | ||
83 | )); | ||
84 | } | ||
85 | }; | ||
86 | |||
87 | let expires_in = std::time::Duration::from_secs(5 * 60); | ||
88 | let expires_at = Utc::now() + expires_in; | ||
89 | |||
90 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | ||
91 | return Some(BatchResponseObject::error( | ||
92 | obj, | ||
93 | StatusCode::INTERNAL_SERVER_ERROR, | ||
94 | "Failed to generate upload URL".to_string(), | ||
95 | )); | ||
96 | }; | ||
97 | let Ok(presigned) = state | ||
98 | .s3_client | ||
99 | .put_object() | ||
100 | .bucket(&state.s3_bucket) | ||
101 | .key(full_path) | ||
102 | .checksum_sha256(obj.oid.to_string()) | ||
103 | .content_length(obj.size) | ||
104 | .presigned(config) | ||
105 | .await | ||
106 | else { | ||
107 | return Some(BatchResponseObject::error( | ||
108 | obj, | ||
109 | StatusCode::INTERNAL_SERVER_ERROR, | ||
110 | "Failed to generate upload URL".to_string(), | ||
111 | )); | ||
112 | }; | ||
113 | Some(BatchResponseObject { | ||
114 | oid: obj.oid, | ||
115 | size: obj.size, | ||
116 | authenticated: Some(true), | ||
117 | actions: BatchResponseObjectActions { | ||
118 | upload: Some(BatchResponseObjectAction { | ||
119 | header: presigned | ||
120 | .headers() | ||
121 | .map(|(k, v)| (k.to_owned(), v.to_owned())) | ||
122 | .collect(), | ||
123 | expires_at, | ||
124 | href: presigned.uri().to_string(), | ||
125 | }), | ||
126 | ..Default::default() | ||
127 | }, | ||
128 | error: None, | ||
129 | }) | ||
130 | } | ||
131 | |||
132 | async fn handle_download_object( | 36 | async fn handle_download_object( |
133 | state: &AppState, | 37 | state: &AppState, |
134 | repo: &str, | 38 | repo: &str, |
@@ -152,24 +56,24 @@ async fn handle_download_object( | |||
152 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); | 56 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); |
153 | return BatchResponseObject::error( | 57 | return BatchResponseObject::error( |
154 | obj, | 58 | obj, |
155 | StatusCode::INTERNAL_SERVER_ERROR, | 59 | http::StatusCode::INTERNAL_SERVER_ERROR, |
156 | "Failed to query object information".to_string(), | 60 | "Failed to query object information".to_string(), |
157 | ); | 61 | ); |
158 | } | 62 | } |
159 | }; | 63 | }; |
160 | 64 | ||
161 | // Scaleway actually doesn't provide SHA256 suport, but maybe in the future :) | 65 | // Scaleway actually doesn't provide SHA256 support, but maybe in the future :) |
162 | if !validate_checksum(obj.oid, &result) { | 66 | if !s3_validate_checksum(obj.oid, &result) { |
163 | return BatchResponseObject::error( | 67 | return BatchResponseObject::error( |
164 | obj, | 68 | obj, |
165 | StatusCode::UNPROCESSABLE_ENTITY, | 69 | http::StatusCode::UNPROCESSABLE_ENTITY, |
166 | "Object corrupted".to_string(), | 70 | "Object corrupted".to_string(), |
167 | ); | 71 | ); |
168 | } | 72 | } |
169 | if !validate_size(obj.size, &result) { | 73 | if !s3_validate_size(obj.size, &result) { |
170 | return BatchResponseObject::error( | 74 | return BatchResponseObject::error( |
171 | obj, | 75 | obj, |
172 | StatusCode::UNPROCESSABLE_ENTITY, | 76 | http::StatusCode::UNPROCESSABLE_ENTITY, |
173 | "Incorrect size specified (or object corrupted)".to_string(), | 77 | "Incorrect size specified (or object corrupted)".to_string(), |
174 | ); | 78 | ); |
175 | } | 79 | } |
@@ -181,7 +85,7 @@ async fn handle_download_object( | |||
181 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | 85 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { |
182 | return BatchResponseObject::error( | 86 | return BatchResponseObject::error( |
183 | obj, | 87 | obj, |
184 | StatusCode::INTERNAL_SERVER_ERROR, | 88 | http::StatusCode::INTERNAL_SERVER_ERROR, |
185 | "Failed to generate upload URL".to_string(), | 89 | "Failed to generate upload URL".to_string(), |
186 | ); | 90 | ); |
187 | }; | 91 | }; |
@@ -195,7 +99,7 @@ async fn handle_download_object( | |||
195 | else { | 99 | else { |
196 | return BatchResponseObject::error( | 100 | return BatchResponseObject::error( |
197 | obj, | 101 | obj, |
198 | StatusCode::INTERNAL_SERVER_ERROR, | 102 | http::StatusCode::INTERNAL_SERVER_ERROR, |
199 | "Failed to generate upload URL".to_string(), | 103 | "Failed to generate upload URL".to_string(), |
200 | ); | 104 | ); |
201 | }; | 105 | }; |
@@ -231,7 +135,7 @@ async fn handle_download_object( | |||
231 | Ok(false) => { | 135 | Ok(false) => { |
232 | return BatchResponseObject::error( | 136 | return BatchResponseObject::error( |
233 | obj, | 137 | obj, |
234 | StatusCode::SERVICE_UNAVAILABLE, | 138 | http::StatusCode::SERVICE_UNAVAILABLE, |
235 | "Public LFS downloads temporarily unavailable".to_string(), | 139 | "Public LFS downloads temporarily unavailable".to_string(), |
236 | ); | 140 | ); |
237 | } | 141 | } |
@@ -239,7 +143,7 @@ async fn handle_download_object( | |||
239 | println!("Failed to request {content_length} bytes from download limiter: {e}"); | 143 | println!("Failed to request {content_length} bytes from download limiter: {e}"); |
240 | return BatchResponseObject::error( | 144 | return BatchResponseObject::error( |
241 | obj, | 145 | obj, |
242 | StatusCode::INTERNAL_SERVER_ERROR, | 146 | http::StatusCode::INTERNAL_SERVER_ERROR, |
243 | "Internal server error".to_string(), | 147 | "Internal server error".to_string(), |
244 | ); | 148 | ); |
245 | } | 149 | } |
@@ -257,7 +161,7 @@ async fn handle_download_object( | |||
257 | ) else { | 161 | ) else { |
258 | return BatchResponseObject::error( | 162 | return BatchResponseObject::error( |
259 | obj, | 163 | obj, |
260 | StatusCode::INTERNAL_SERVER_ERROR, | 164 | http::StatusCode::INTERNAL_SERVER_ERROR, |
261 | "Internal server error".to_string(), | 165 | "Internal server error".to_string(), |
262 | ); | 166 | ); |
263 | }; | 167 | }; |
@@ -292,83 +196,6 @@ async fn handle_download_object( | |||
292 | } | 196 | } |
293 | } | 197 | } |
294 | 198 | ||
295 | fn repo_exists(name: &str) -> bool { | ||
296 | let Ok(metadata) = std::fs::metadata(name) else { | ||
297 | return false; | ||
298 | }; | ||
299 | metadata.is_dir() | ||
300 | } | ||
301 | |||
302 | fn is_repo_public(name: &str) -> Option<bool> { | ||
303 | if !repo_exists(name) { | ||
304 | return None; | ||
305 | } | ||
306 | match std::fs::metadata(format!("{name}/git-daemon-export-ok")) { | ||
307 | Ok(metadata) if metadata.is_file() => Some(true), | ||
308 | Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false), | ||
309 | _ => None, | ||
310 | } | ||
311 | } | ||
312 | |||
313 | pub async fn batch( | ||
314 | State(state): State<Arc<AppState>>, | ||
315 | headers: HeaderMap, | ||
316 | RepositoryName(repo): RepositoryName, | ||
317 | GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>, | ||
318 | ) -> Response { | ||
319 | let Some(public) = is_repo_public(&repo) else { | ||
320 | return REPO_NOT_FOUND.into_response(); | ||
321 | }; | ||
322 | let Trusted(trusted) = match authorize_batch( | ||
323 | &state.authz_conf, | ||
324 | &repo, | ||
325 | public, | ||
326 | payload.operation, | ||
327 | &headers, | ||
328 | ) { | ||
329 | Ok(authn) => authn, | ||
330 | Err(e) => return e.into_response(), | ||
331 | }; | ||
332 | |||
333 | if !headers | ||
334 | .get_all("Accept") | ||
335 | .iter() | ||
336 | .filter_map(|v| v.to_str().ok()) | ||
337 | .any(is_git_lfs_json_mimetype) | ||
338 | { | ||
339 | let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types"); | ||
340 | return make_error_resp(StatusCode::NOT_ACCEPTABLE, &message).into_response(); | ||
341 | } | ||
342 | |||
343 | if payload.hash_algo != HashAlgo::Sha256 { | ||
344 | let message = "Unsupported hashing algorithm specified"; | ||
345 | return make_error_resp(StatusCode::CONFLICT, message).into_response(); | ||
346 | } | ||
347 | if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) { | ||
348 | let message = "Unsupported transfer adapter specified (supported: basic)"; | ||
349 | return make_error_resp(StatusCode::CONFLICT, message).into_response(); | ||
350 | } | ||
351 | |||
352 | let mut resp = BatchResponse { | ||
353 | transfer: TransferAdapter::Basic, | ||
354 | objects: vec![], | ||
355 | hash_algo: HashAlgo::Sha256, | ||
356 | }; | ||
357 | for obj in payload.objects { | ||
358 | match payload.operation { | ||
359 | Operation::Download => resp | ||
360 | .objects | ||
361 | .push(handle_download_object(&state, &repo, &obj, trusted).await), | ||
362 | Operation::Upload => { | ||
363 | if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await { | ||
364 | resp.objects.push(obj_resp); | ||
365 | } | ||
366 | } | ||
367 | }; | ||
368 | } | ||
369 | GitLfsJson(Json(resp)).into_response() | ||
370 | } | ||
371 | |||
372 | #[derive(Deserialize, Copy, Clone)] | 199 | #[derive(Deserialize, Copy, Clone)] |
373 | #[serde(remote = "Self")] | 200 | #[serde(remote = "Self")] |
374 | pub struct FileParams { | 201 | pub struct FileParams { |
@@ -382,11 +209,11 @@ impl<'de> Deserialize<'de> for FileParams { | |||
382 | where | 209 | where |
383 | D: serde::Deserializer<'de>, | 210 | D: serde::Deserializer<'de>, |
384 | { | 211 | { |
385 | let unchecked @ FileParams { | 212 | let unchecked @ Self { |
386 | oid0: HexByte(oid0), | 213 | oid0: HexByte(oid0), |
387 | oid1: HexByte(oid1), | 214 | oid1: HexByte(oid1), |
388 | oid, | 215 | oid, |
389 | } = FileParams::deserialize(deserializer)?; | 216 | } = Self::deserialize(deserializer)?; |
390 | if oid0 != oid.as_bytes()[0] { | 217 | if oid0 != oid.as_bytes()[0] { |
391 | return Err(de::Error::custom( | 218 | return Err(de::Error::custom( |
392 | "first OID path part does not match first byte of full OID", | 219 | "first OID path part does not match first byte of full OID", |
@@ -401,9 +228,9 @@ impl<'de> Deserialize<'de> for FileParams { | |||
401 | } | 228 | } |
402 | } | 229 | } |
403 | 230 | ||
404 | pub async fn obj_download( | 231 | pub async fn handle_obj_download( |
405 | State(state): State<Arc<AppState>>, | 232 | State(state): State<Arc<AppState>>, |
406 | headers: HeaderMap, | 233 | headers: http::HeaderMap, |
407 | RepositoryName(repo): RepositoryName, | 234 | RepositoryName(repo): RepositoryName, |
408 | Path(FileParams { oid0, oid1, oid }): Path<FileParams>, | 235 | Path(FileParams { oid0, oid1, oid }): Path<FileParams>, |
409 | ) -> Response { | 236 | ) -> Response { |
@@ -425,26 +252,26 @@ pub async fn obj_download( | |||
425 | Err(e) => { | 252 | Err(e) => { |
426 | println!("Failed to GetObject (repo {repo}, OID {oid}): {e}"); | 253 | println!("Failed to GetObject (repo {repo}, OID {oid}): {e}"); |
427 | return ( | 254 | return ( |
428 | StatusCode::INTERNAL_SERVER_ERROR, | 255 | http::StatusCode::INTERNAL_SERVER_ERROR, |
429 | "Failed to query object information", | 256 | "Failed to query object information", |
430 | ) | 257 | ) |
431 | .into_response(); | 258 | .into_response(); |
432 | } | 259 | } |
433 | }; | 260 | }; |
434 | 261 | ||
435 | let mut headers = header::HeaderMap::new(); | 262 | let mut headers = http::header::HeaderMap::new(); |
436 | if let Some(content_type) = result.content_type { | 263 | if let Some(content_type) = result.content_type { |
437 | let Ok(header_value) = content_type.try_into() else { | 264 | let Ok(header_value) = content_type.try_into() else { |
438 | return ( | 265 | return ( |
439 | StatusCode::INTERNAL_SERVER_ERROR, | 266 | http::StatusCode::INTERNAL_SERVER_ERROR, |
440 | "Object has invalid content type", | 267 | "Object has invalid content type", |
441 | ) | 268 | ) |
442 | .into_response(); | 269 | .into_response(); |
443 | }; | 270 | }; |
444 | headers.insert(header::CONTENT_TYPE, header_value); | 271 | headers.insert(http::header::CONTENT_TYPE, header_value); |
445 | } | 272 | } |
446 | if let Some(content_length) = result.content_length { | 273 | if let Some(content_length) = result.content_length { |
447 | headers.insert(header::CONTENT_LENGTH, content_length.into()); | 274 | headers.insert(http::header::CONTENT_LENGTH, content_length.into()); |
448 | } | 275 | } |
449 | 276 | ||
450 | let async_read = result.body.into_async_read(); | 277 | let async_read = result.body.into_async_read(); |
@@ -453,3 +280,176 @@ pub async fn obj_download( | |||
453 | 280 | ||
454 | (headers, body).into_response() | 281 | (headers, body).into_response() |
455 | } | 282 | } |
283 | |||
284 | async fn handle_upload_object( | ||
285 | state: &AppState, | ||
286 | repo: &str, | ||
287 | obj: &BatchRequestObject, | ||
288 | ) -> Option<BatchResponseObject> { | ||
289 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); | ||
290 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); | ||
291 | |||
292 | match state | ||
293 | .s3_client | ||
294 | .head_object() | ||
295 | .bucket(&state.s3_bucket) | ||
296 | .key(full_path.clone()) | ||
297 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) | ||
298 | .send() | ||
299 | .await | ||
300 | { | ||
301 | Ok(result) => { | ||
302 | if s3_validate_size(obj.size, &result) && s3_validate_checksum(obj.oid, &result) { | ||
303 | return None; | ||
304 | } | ||
305 | } | ||
306 | Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {} | ||
307 | Err(e) => { | ||
308 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); | ||
309 | return Some(BatchResponseObject::error( | ||
310 | obj, | ||
311 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
312 | "Failed to query object information".to_string(), | ||
313 | )); | ||
314 | } | ||
315 | }; | ||
316 | |||
317 | let expires_in = std::time::Duration::from_secs(5 * 60); | ||
318 | let expires_at = Utc::now() + expires_in; | ||
319 | |||
320 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | ||
321 | return Some(BatchResponseObject::error( | ||
322 | obj, | ||
323 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
324 | "Failed to generate upload URL".to_string(), | ||
325 | )); | ||
326 | }; | ||
327 | let Ok(presigned) = state | ||
328 | .s3_client | ||
329 | .put_object() | ||
330 | .bucket(&state.s3_bucket) | ||
331 | .key(full_path) | ||
332 | .checksum_sha256(obj.oid.to_string()) | ||
333 | .content_length(obj.size) | ||
334 | .presigned(config) | ||
335 | .await | ||
336 | else { | ||
337 | return Some(BatchResponseObject::error( | ||
338 | obj, | ||
339 | http::StatusCode::INTERNAL_SERVER_ERROR, | ||
340 | "Failed to generate upload URL".to_string(), | ||
341 | )); | ||
342 | }; | ||
343 | Some(BatchResponseObject { | ||
344 | oid: obj.oid, | ||
345 | size: obj.size, | ||
346 | authenticated: Some(true), | ||
347 | actions: BatchResponseObjectActions { | ||
348 | upload: Some(BatchResponseObjectAction { | ||
349 | header: presigned | ||
350 | .headers() | ||
351 | .map(|(k, v)| (k.to_owned(), v.to_owned())) | ||
352 | .collect(), | ||
353 | expires_at, | ||
354 | href: presigned.uri().to_string(), | ||
355 | }), | ||
356 | ..Default::default() | ||
357 | }, | ||
358 | error: None, | ||
359 | }) | ||
360 | } | ||
361 | |||
362 | pub async fn handle_batch( | ||
363 | State(state): State<Arc<AppState>>, | ||
364 | headers: http::HeaderMap, | ||
365 | RepositoryName(repo): RepositoryName, | ||
366 | GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>, | ||
367 | ) -> Response { | ||
368 | let Some(public) = is_repo_public(&repo) else { | ||
369 | return REPO_NOT_FOUND.into_response(); | ||
370 | }; | ||
371 | let Trusted(trusted) = match authorize_batch( | ||
372 | &state.authz_conf, | ||
373 | &repo, | ||
374 | public, | ||
375 | payload.operation, | ||
376 | &headers, | ||
377 | ) { | ||
378 | Ok(authn) => authn, | ||
379 | Err(e) => return e.into_response(), | ||
380 | }; | ||
381 | |||
382 | if !headers | ||
383 | .get_all("Accept") | ||
384 | .iter() | ||
385 | .filter_map(|v| v.to_str().ok()) | ||
386 | .any(is_git_lfs_json_mimetype) | ||
387 | { | ||
388 | let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types"); | ||
389 | return make_error_resp(http::StatusCode::NOT_ACCEPTABLE, &message).into_response(); | ||
390 | } | ||
391 | |||
392 | if payload.hash_algo != HashAlgo::Sha256 { | ||
393 | let message = "Unsupported hashing algorithm specified"; | ||
394 | return make_error_resp(http::StatusCode::CONFLICT, message).into_response(); | ||
395 | } | ||
396 | if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) { | ||
397 | let message = "Unsupported transfer adapter specified (supported: basic)"; | ||
398 | return make_error_resp(http::StatusCode::CONFLICT, message).into_response(); | ||
399 | } | ||
400 | |||
401 | let mut resp = BatchResponse { | ||
402 | transfer: TransferAdapter::Basic, | ||
403 | objects: vec![], | ||
404 | hash_algo: HashAlgo::Sha256, | ||
405 | }; | ||
406 | for obj in payload.objects { | ||
407 | match payload.operation { | ||
408 | Operation::Download => resp | ||
409 | .objects | ||
410 | .push(handle_download_object(&state, &repo, &obj, trusted).await), | ||
411 | Operation::Upload => { | ||
412 | if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await { | ||
413 | resp.objects.push(obj_resp); | ||
414 | } | ||
415 | } | ||
416 | }; | ||
417 | } | ||
418 | GitLfsJson(Json(resp)).into_response() | ||
419 | } | ||
420 | |||
421 | fn s3_validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool { | ||
422 | if let Some(checksum) = obj.checksum_sha256() { | ||
423 | if let Ok(checksum) = BASE64_STANDARD.decode(checksum) { | ||
424 | if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) { | ||
425 | return Oid::from(checksum32b) == oid; | ||
426 | } | ||
427 | } | ||
428 | } | ||
429 | true | ||
430 | } | ||
431 | |||
432 | fn s3_validate_size(expected: i64, obj: &HeadObjectOutput) -> bool { | ||
433 | if let Some(length) = obj.content_length() { | ||
434 | return length == expected; | ||
435 | } | ||
436 | true | ||
437 | } | ||
438 | |||
439 | fn repo_exists(name: &str) -> bool { | ||
440 | let Ok(metadata) = std::fs::metadata(name) else { | ||
441 | return false; | ||
442 | }; | ||
443 | metadata.is_dir() | ||
444 | } | ||
445 | |||
446 | fn is_repo_public(name: &str) -> Option<bool> { | ||
447 | if !repo_exists(name) { | ||
448 | return None; | ||
449 | } | ||
450 | match std::fs::metadata(format!("{name}/git-daemon-export-ok")) { | ||
451 | Ok(metadata) if metadata.is_file() => Some(true), | ||
452 | Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false), | ||
453 | _ => None, | ||
454 | } | ||
455 | } | ||
diff --git a/gitolfs3-server/src/main.rs b/gitolfs3-server/src/main.rs index c9911ed..46e840a 100644 --- a/gitolfs3-server/src/main.rs +++ b/gitolfs3-server/src/main.rs | |||
@@ -10,12 +10,13 @@ use dlimit::DownloadLimiter; | |||
10 | 10 | ||
11 | use axum::{ | 11 | use axum::{ |
12 | extract::OriginalUri, | 12 | extract::OriginalUri, |
13 | http::{StatusCode, Uri}, | 13 | http::{self, Uri}, |
14 | routing::{get, post}, | 14 | routing::{get, post}, |
15 | Router, ServiceExt, | 15 | Router, ServiceExt, |
16 | }; | 16 | }; |
17 | use handler::AppState; | 17 | use handler::{handle_batch, handle_obj_download, AppState}; |
18 | use std::{process::ExitCode, sync::Arc}; | 18 | use std::{process::ExitCode, sync::Arc}; |
19 | use tokio::net::TcpListener; | ||
19 | use tower::Layer; | 20 | use tower::Layer; |
20 | 21 | ||
21 | #[tokio::main] | 22 | #[tokio::main] |
@@ -39,14 +40,14 @@ async fn main() -> ExitCode { | |||
39 | dl_limiter, | 40 | dl_limiter, |
40 | }); | 41 | }); |
41 | let app = Router::new() | 42 | let app = Router::new() |
42 | .route("/batch", post(handler::batch)) | 43 | .route("/batch", post(handle_batch)) |
43 | .route("/:oid0/:oid1/:oid", get(handler::obj_download)) | 44 | .route("/:oid0/:oid1/:oid", get(handle_obj_download)) |
44 | .with_state(shared_state); | 45 | .with_state(shared_state); |
45 | 46 | ||
46 | let middleware = axum::middleware::map_request(rewrite_url); | 47 | let middleware = axum::middleware::map_request(rewrite_url); |
47 | let app_with_middleware = middleware.layer(app); | 48 | let app_with_middleware = middleware.layer(app); |
48 | 49 | ||
49 | let listener = match tokio::net::TcpListener::bind(conf.listen_addr).await { | 50 | let listener = match TcpListener::bind(conf.listen_addr).await { |
50 | Ok(listener) => listener, | 51 | Ok(listener) => listener, |
51 | Err(e) => { | 52 | Err(e) => { |
52 | println!("Failed to listen: {e}"); | 53 | println!("Failed to listen: {e}"); |
@@ -63,25 +64,23 @@ async fn main() -> ExitCode { | |||
63 | } | 64 | } |
64 | } | 65 | } |
65 | 66 | ||
66 | async fn rewrite_url<B>( | 67 | async fn rewrite_url<B>(mut req: http::Request<B>) -> Result<http::Request<B>, http::StatusCode> { |
67 | mut req: axum::http::Request<B>, | ||
68 | ) -> Result<axum::http::Request<B>, StatusCode> { | ||
69 | let uri = req.uri(); | 68 | let uri = req.uri(); |
70 | let original_uri = OriginalUri(uri.clone()); | 69 | let original_uri = OriginalUri(uri.clone()); |
71 | 70 | ||
72 | let Some(path_and_query) = uri.path_and_query() else { | 71 | let Some(path_and_query) = uri.path_and_query() else { |
73 | // L @ no path & query | 72 | // L @ no path & query |
74 | return Err(StatusCode::BAD_REQUEST); | 73 | return Err(http::StatusCode::BAD_REQUEST); |
75 | }; | 74 | }; |
76 | let Some((repo, path)) = path_and_query.path().split_once("/info/lfs/objects") else { | 75 | let Some((repo, path)) = path_and_query.path().split_once("/info/lfs/objects") else { |
77 | return Err(StatusCode::NOT_FOUND); | 76 | return Err(http::StatusCode::NOT_FOUND); |
78 | }; | 77 | }; |
79 | let repo = repo | 78 | let repo = repo |
80 | .trim_start_matches('/') | 79 | .trim_start_matches('/') |
81 | .trim_end_matches('/') | 80 | .trim_end_matches('/') |
82 | .to_string(); | 81 | .to_string(); |
83 | if !path.starts_with('/') || !repo.ends_with(".git") { | 82 | if !path.starts_with('/') || !repo.ends_with(".git") { |
84 | return Err(StatusCode::NOT_FOUND); | 83 | return Err(http::StatusCode::NOT_FOUND); |
85 | } | 84 | } |
86 | 85 | ||
87 | let mut parts = uri.clone().into_parts(); | 86 | let mut parts = uri.clone().into_parts(); |
@@ -90,7 +89,7 @@ async fn rewrite_url<B>( | |||
90 | Some(q) => format!("{path}?{q}").try_into().ok(), | 89 | Some(q) => format!("{path}?{q}").try_into().ok(), |
91 | }; | 90 | }; |
92 | let Ok(new_uri) = Uri::from_parts(parts) else { | 91 | let Ok(new_uri) = Uri::from_parts(parts) else { |
93 | return Err(StatusCode::INTERNAL_SERVER_ERROR); | 92 | return Err(http::StatusCode::INTERNAL_SERVER_ERROR); |
94 | }; | 93 | }; |
95 | 94 | ||
96 | *req.uri_mut() = new_uri; | 95 | *req.uri_mut() = new_uri; |