aboutsummaryrefslogtreecommitdiffstats
path: root/gitolfs3-server/src/handler.rs
diff options
context:
space:
mode:
authorLibravatar Rutger Broekhoff2024-07-12 00:29:57 +0200
committerLibravatar Rutger Broekhoff2024-07-12 00:29:57 +0200
commitbc709f0f23be345a1e2ccd06acd36bd5dac40bde (patch)
tree4ffe66b1ac246e0a9eab4a2649a7db5bb3a1ff0a /gitolfs3-server/src/handler.rs
parent3e67a3486eed22522f4352503ef7067ca81a8050 (diff)
downloadgitolfs3-main.tar.gz
gitolfs3-main.zip
Restructure serverHEADmain
Diffstat (limited to 'gitolfs3-server/src/handler.rs')
-rw-r--r--gitolfs3-server/src/handler.rs388
1 files changed, 194 insertions, 194 deletions
diff --git a/gitolfs3-server/src/handler.rs b/gitolfs3-server/src/handler.rs
index 6516291..b9f9bcf 100644
--- a/gitolfs3-server/src/handler.rs
+++ b/gitolfs3-server/src/handler.rs
@@ -3,7 +3,7 @@ use std::{collections::HashMap, sync::Arc};
3use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput}; 3use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput};
4use axum::{ 4use axum::{
5 extract::{Path, State}, 5 extract::{Path, State},
6 http::{header, HeaderMap, StatusCode}, 6 http,
7 response::{IntoResponse, Response}, 7 response::{IntoResponse, Response},
8 Json, 8 Json,
9}; 9};
@@ -33,102 +33,6 @@ pub struct AppState {
33 pub dl_limiter: Arc<Mutex<DownloadLimiter>>, 33 pub dl_limiter: Arc<Mutex<DownloadLimiter>>,
34} 34}
35 35
36fn validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool {
37 if let Some(checksum) = obj.checksum_sha256() {
38 if let Ok(checksum) = BASE64_STANDARD.decode(checksum) {
39 if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) {
40 return Oid::from(checksum32b) == oid;
41 }
42 }
43 }
44 true
45}
46
47fn validate_size(expected: i64, obj: &HeadObjectOutput) -> bool {
48 if let Some(length) = obj.content_length() {
49 return length == expected;
50 }
51 true
52}
53
54async fn handle_upload_object(
55 state: &AppState,
56 repo: &str,
57 obj: &BatchRequestObject,
58) -> Option<BatchResponseObject> {
59 let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1]));
60 let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid);
61
62 match state
63 .s3_client
64 .head_object()
65 .bucket(&state.s3_bucket)
66 .key(full_path.clone())
67 .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
68 .send()
69 .await
70 {
71 Ok(result) => {
72 if validate_size(obj.size, &result) && validate_checksum(obj.oid, &result) {
73 return None;
74 }
75 }
76 Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {}
77 Err(e) => {
78 println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid);
79 return Some(BatchResponseObject::error(
80 obj,
81 StatusCode::INTERNAL_SERVER_ERROR,
82 "Failed to query object information".to_string(),
83 ));
84 }
85 };
86
87 let expires_in = std::time::Duration::from_secs(5 * 60);
88 let expires_at = Utc::now() + expires_in;
89
90 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else {
91 return Some(BatchResponseObject::error(
92 obj,
93 StatusCode::INTERNAL_SERVER_ERROR,
94 "Failed to generate upload URL".to_string(),
95 ));
96 };
97 let Ok(presigned) = state
98 .s3_client
99 .put_object()
100 .bucket(&state.s3_bucket)
101 .key(full_path)
102 .checksum_sha256(obj.oid.to_string())
103 .content_length(obj.size)
104 .presigned(config)
105 .await
106 else {
107 return Some(BatchResponseObject::error(
108 obj,
109 StatusCode::INTERNAL_SERVER_ERROR,
110 "Failed to generate upload URL".to_string(),
111 ));
112 };
113 Some(BatchResponseObject {
114 oid: obj.oid,
115 size: obj.size,
116 authenticated: Some(true),
117 actions: BatchResponseObjectActions {
118 upload: Some(BatchResponseObjectAction {
119 header: presigned
120 .headers()
121 .map(|(k, v)| (k.to_owned(), v.to_owned()))
122 .collect(),
123 expires_at,
124 href: presigned.uri().to_string(),
125 }),
126 ..Default::default()
127 },
128 error: None,
129 })
130}
131
132async fn handle_download_object( 36async fn handle_download_object(
133 state: &AppState, 37 state: &AppState,
134 repo: &str, 38 repo: &str,
@@ -152,24 +56,24 @@ async fn handle_download_object(
152 println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); 56 println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid);
153 return BatchResponseObject::error( 57 return BatchResponseObject::error(
154 obj, 58 obj,
155 StatusCode::INTERNAL_SERVER_ERROR, 59 http::StatusCode::INTERNAL_SERVER_ERROR,
156 "Failed to query object information".to_string(), 60 "Failed to query object information".to_string(),
157 ); 61 );
158 } 62 }
159 }; 63 };
160 64
161 // Scaleway actually doesn't provide SHA256 suport, but maybe in the future :) 65 // Scaleway actually doesn't provide SHA256 support, but maybe in the future :)
162 if !validate_checksum(obj.oid, &result) { 66 if !s3_validate_checksum(obj.oid, &result) {
163 return BatchResponseObject::error( 67 return BatchResponseObject::error(
164 obj, 68 obj,
165 StatusCode::UNPROCESSABLE_ENTITY, 69 http::StatusCode::UNPROCESSABLE_ENTITY,
166 "Object corrupted".to_string(), 70 "Object corrupted".to_string(),
167 ); 71 );
168 } 72 }
169 if !validate_size(obj.size, &result) { 73 if !s3_validate_size(obj.size, &result) {
170 return BatchResponseObject::error( 74 return BatchResponseObject::error(
171 obj, 75 obj,
172 StatusCode::UNPROCESSABLE_ENTITY, 76 http::StatusCode::UNPROCESSABLE_ENTITY,
173 "Incorrect size specified (or object corrupted)".to_string(), 77 "Incorrect size specified (or object corrupted)".to_string(),
174 ); 78 );
175 } 79 }
@@ -181,7 +85,7 @@ async fn handle_download_object(
181 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { 85 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else {
182 return BatchResponseObject::error( 86 return BatchResponseObject::error(
183 obj, 87 obj,
184 StatusCode::INTERNAL_SERVER_ERROR, 88 http::StatusCode::INTERNAL_SERVER_ERROR,
185 "Failed to generate upload URL".to_string(), 89 "Failed to generate upload URL".to_string(),
186 ); 90 );
187 }; 91 };
@@ -195,7 +99,7 @@ async fn handle_download_object(
195 else { 99 else {
196 return BatchResponseObject::error( 100 return BatchResponseObject::error(
197 obj, 101 obj,
198 StatusCode::INTERNAL_SERVER_ERROR, 102 http::StatusCode::INTERNAL_SERVER_ERROR,
199 "Failed to generate upload URL".to_string(), 103 "Failed to generate upload URL".to_string(),
200 ); 104 );
201 }; 105 };
@@ -231,7 +135,7 @@ async fn handle_download_object(
231 Ok(false) => { 135 Ok(false) => {
232 return BatchResponseObject::error( 136 return BatchResponseObject::error(
233 obj, 137 obj,
234 StatusCode::SERVICE_UNAVAILABLE, 138 http::StatusCode::SERVICE_UNAVAILABLE,
235 "Public LFS downloads temporarily unavailable".to_string(), 139 "Public LFS downloads temporarily unavailable".to_string(),
236 ); 140 );
237 } 141 }
@@ -239,7 +143,7 @@ async fn handle_download_object(
239 println!("Failed to request {content_length} bytes from download limiter: {e}"); 143 println!("Failed to request {content_length} bytes from download limiter: {e}");
240 return BatchResponseObject::error( 144 return BatchResponseObject::error(
241 obj, 145 obj,
242 StatusCode::INTERNAL_SERVER_ERROR, 146 http::StatusCode::INTERNAL_SERVER_ERROR,
243 "Internal server error".to_string(), 147 "Internal server error".to_string(),
244 ); 148 );
245 } 149 }
@@ -257,7 +161,7 @@ async fn handle_download_object(
257 ) else { 161 ) else {
258 return BatchResponseObject::error( 162 return BatchResponseObject::error(
259 obj, 163 obj,
260 StatusCode::INTERNAL_SERVER_ERROR, 164 http::StatusCode::INTERNAL_SERVER_ERROR,
261 "Internal server error".to_string(), 165 "Internal server error".to_string(),
262 ); 166 );
263 }; 167 };
@@ -292,83 +196,6 @@ async fn handle_download_object(
292 } 196 }
293} 197}
294 198
295fn repo_exists(name: &str) -> bool {
296 let Ok(metadata) = std::fs::metadata(name) else {
297 return false;
298 };
299 metadata.is_dir()
300}
301
302fn is_repo_public(name: &str) -> Option<bool> {
303 if !repo_exists(name) {
304 return None;
305 }
306 match std::fs::metadata(format!("{name}/git-daemon-export-ok")) {
307 Ok(metadata) if metadata.is_file() => Some(true),
308 Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false),
309 _ => None,
310 }
311}
312
313pub async fn batch(
314 State(state): State<Arc<AppState>>,
315 headers: HeaderMap,
316 RepositoryName(repo): RepositoryName,
317 GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>,
318) -> Response {
319 let Some(public) = is_repo_public(&repo) else {
320 return REPO_NOT_FOUND.into_response();
321 };
322 let Trusted(trusted) = match authorize_batch(
323 &state.authz_conf,
324 &repo,
325 public,
326 payload.operation,
327 &headers,
328 ) {
329 Ok(authn) => authn,
330 Err(e) => return e.into_response(),
331 };
332
333 if !headers
334 .get_all("Accept")
335 .iter()
336 .filter_map(|v| v.to_str().ok())
337 .any(is_git_lfs_json_mimetype)
338 {
339 let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types");
340 return make_error_resp(StatusCode::NOT_ACCEPTABLE, &message).into_response();
341 }
342
343 if payload.hash_algo != HashAlgo::Sha256 {
344 let message = "Unsupported hashing algorithm specified";
345 return make_error_resp(StatusCode::CONFLICT, message).into_response();
346 }
347 if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) {
348 let message = "Unsupported transfer adapter specified (supported: basic)";
349 return make_error_resp(StatusCode::CONFLICT, message).into_response();
350 }
351
352 let mut resp = BatchResponse {
353 transfer: TransferAdapter::Basic,
354 objects: vec![],
355 hash_algo: HashAlgo::Sha256,
356 };
357 for obj in payload.objects {
358 match payload.operation {
359 Operation::Download => resp
360 .objects
361 .push(handle_download_object(&state, &repo, &obj, trusted).await),
362 Operation::Upload => {
363 if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await {
364 resp.objects.push(obj_resp);
365 }
366 }
367 };
368 }
369 GitLfsJson(Json(resp)).into_response()
370}
371
372#[derive(Deserialize, Copy, Clone)] 199#[derive(Deserialize, Copy, Clone)]
373#[serde(remote = "Self")] 200#[serde(remote = "Self")]
374pub struct FileParams { 201pub struct FileParams {
@@ -382,11 +209,11 @@ impl<'de> Deserialize<'de> for FileParams {
382 where 209 where
383 D: serde::Deserializer<'de>, 210 D: serde::Deserializer<'de>,
384 { 211 {
385 let unchecked @ FileParams { 212 let unchecked @ Self {
386 oid0: HexByte(oid0), 213 oid0: HexByte(oid0),
387 oid1: HexByte(oid1), 214 oid1: HexByte(oid1),
388 oid, 215 oid,
389 } = FileParams::deserialize(deserializer)?; 216 } = Self::deserialize(deserializer)?;
390 if oid0 != oid.as_bytes()[0] { 217 if oid0 != oid.as_bytes()[0] {
391 return Err(de::Error::custom( 218 return Err(de::Error::custom(
392 "first OID path part does not match first byte of full OID", 219 "first OID path part does not match first byte of full OID",
@@ -401,9 +228,9 @@ impl<'de> Deserialize<'de> for FileParams {
401 } 228 }
402} 229}
403 230
404pub async fn obj_download( 231pub async fn handle_obj_download(
405 State(state): State<Arc<AppState>>, 232 State(state): State<Arc<AppState>>,
406 headers: HeaderMap, 233 headers: http::HeaderMap,
407 RepositoryName(repo): RepositoryName, 234 RepositoryName(repo): RepositoryName,
408 Path(FileParams { oid0, oid1, oid }): Path<FileParams>, 235 Path(FileParams { oid0, oid1, oid }): Path<FileParams>,
409) -> Response { 236) -> Response {
@@ -425,26 +252,26 @@ pub async fn obj_download(
425 Err(e) => { 252 Err(e) => {
426 println!("Failed to GetObject (repo {repo}, OID {oid}): {e}"); 253 println!("Failed to GetObject (repo {repo}, OID {oid}): {e}");
427 return ( 254 return (
428 StatusCode::INTERNAL_SERVER_ERROR, 255 http::StatusCode::INTERNAL_SERVER_ERROR,
429 "Failed to query object information", 256 "Failed to query object information",
430 ) 257 )
431 .into_response(); 258 .into_response();
432 } 259 }
433 }; 260 };
434 261
435 let mut headers = header::HeaderMap::new(); 262 let mut headers = http::header::HeaderMap::new();
436 if let Some(content_type) = result.content_type { 263 if let Some(content_type) = result.content_type {
437 let Ok(header_value) = content_type.try_into() else { 264 let Ok(header_value) = content_type.try_into() else {
438 return ( 265 return (
439 StatusCode::INTERNAL_SERVER_ERROR, 266 http::StatusCode::INTERNAL_SERVER_ERROR,
440 "Object has invalid content type", 267 "Object has invalid content type",
441 ) 268 )
442 .into_response(); 269 .into_response();
443 }; 270 };
444 headers.insert(header::CONTENT_TYPE, header_value); 271 headers.insert(http::header::CONTENT_TYPE, header_value);
445 } 272 }
446 if let Some(content_length) = result.content_length { 273 if let Some(content_length) = result.content_length {
447 headers.insert(header::CONTENT_LENGTH, content_length.into()); 274 headers.insert(http::header::CONTENT_LENGTH, content_length.into());
448 } 275 }
449 276
450 let async_read = result.body.into_async_read(); 277 let async_read = result.body.into_async_read();
@@ -453,3 +280,176 @@ pub async fn obj_download(
453 280
454 (headers, body).into_response() 281 (headers, body).into_response()
455} 282}
283
284async fn handle_upload_object(
285 state: &AppState,
286 repo: &str,
287 obj: &BatchRequestObject,
288) -> Option<BatchResponseObject> {
289 let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1]));
290 let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid);
291
292 match state
293 .s3_client
294 .head_object()
295 .bucket(&state.s3_bucket)
296 .key(full_path.clone())
297 .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
298 .send()
299 .await
300 {
301 Ok(result) => {
302 if s3_validate_size(obj.size, &result) && s3_validate_checksum(obj.oid, &result) {
303 return None;
304 }
305 }
306 Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {}
307 Err(e) => {
308 println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid);
309 return Some(BatchResponseObject::error(
310 obj,
311 http::StatusCode::INTERNAL_SERVER_ERROR,
312 "Failed to query object information".to_string(),
313 ));
314 }
315 };
316
317 let expires_in = std::time::Duration::from_secs(5 * 60);
318 let expires_at = Utc::now() + expires_in;
319
320 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else {
321 return Some(BatchResponseObject::error(
322 obj,
323 http::StatusCode::INTERNAL_SERVER_ERROR,
324 "Failed to generate upload URL".to_string(),
325 ));
326 };
327 let Ok(presigned) = state
328 .s3_client
329 .put_object()
330 .bucket(&state.s3_bucket)
331 .key(full_path)
332 .checksum_sha256(obj.oid.to_string())
333 .content_length(obj.size)
334 .presigned(config)
335 .await
336 else {
337 return Some(BatchResponseObject::error(
338 obj,
339 http::StatusCode::INTERNAL_SERVER_ERROR,
340 "Failed to generate upload URL".to_string(),
341 ));
342 };
343 Some(BatchResponseObject {
344 oid: obj.oid,
345 size: obj.size,
346 authenticated: Some(true),
347 actions: BatchResponseObjectActions {
348 upload: Some(BatchResponseObjectAction {
349 header: presigned
350 .headers()
351 .map(|(k, v)| (k.to_owned(), v.to_owned()))
352 .collect(),
353 expires_at,
354 href: presigned.uri().to_string(),
355 }),
356 ..Default::default()
357 },
358 error: None,
359 })
360}
361
362pub async fn handle_batch(
363 State(state): State<Arc<AppState>>,
364 headers: http::HeaderMap,
365 RepositoryName(repo): RepositoryName,
366 GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>,
367) -> Response {
368 let Some(public) = is_repo_public(&repo) else {
369 return REPO_NOT_FOUND.into_response();
370 };
371 let Trusted(trusted) = match authorize_batch(
372 &state.authz_conf,
373 &repo,
374 public,
375 payload.operation,
376 &headers,
377 ) {
378 Ok(authn) => authn,
379 Err(e) => return e.into_response(),
380 };
381
382 if !headers
383 .get_all("Accept")
384 .iter()
385 .filter_map(|v| v.to_str().ok())
386 .any(is_git_lfs_json_mimetype)
387 {
388 let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types");
389 return make_error_resp(http::StatusCode::NOT_ACCEPTABLE, &message).into_response();
390 }
391
392 if payload.hash_algo != HashAlgo::Sha256 {
393 let message = "Unsupported hashing algorithm specified";
394 return make_error_resp(http::StatusCode::CONFLICT, message).into_response();
395 }
396 if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) {
397 let message = "Unsupported transfer adapter specified (supported: basic)";
398 return make_error_resp(http::StatusCode::CONFLICT, message).into_response();
399 }
400
401 let mut resp = BatchResponse {
402 transfer: TransferAdapter::Basic,
403 objects: vec![],
404 hash_algo: HashAlgo::Sha256,
405 };
406 for obj in payload.objects {
407 match payload.operation {
408 Operation::Download => resp
409 .objects
410 .push(handle_download_object(&state, &repo, &obj, trusted).await),
411 Operation::Upload => {
412 if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await {
413 resp.objects.push(obj_resp);
414 }
415 }
416 };
417 }
418 GitLfsJson(Json(resp)).into_response()
419}
420
421fn s3_validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool {
422 if let Some(checksum) = obj.checksum_sha256() {
423 if let Ok(checksum) = BASE64_STANDARD.decode(checksum) {
424 if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) {
425 return Oid::from(checksum32b) == oid;
426 }
427 }
428 }
429 true
430}
431
432fn s3_validate_size(expected: i64, obj: &HeadObjectOutput) -> bool {
433 if let Some(length) = obj.content_length() {
434 return length == expected;
435 }
436 true
437}
438
439fn repo_exists(name: &str) -> bool {
440 let Ok(metadata) = std::fs::metadata(name) else {
441 return false;
442 };
443 metadata.is_dir()
444}
445
446fn is_repo_public(name: &str) -> Option<bool> {
447 if !repo_exists(name) {
448 return None;
449 }
450 match std::fs::metadata(format!("{name}/git-daemon-export-ok")) {
451 Ok(metadata) if metadata.is_file() => Some(true),
452 Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false),
453 _ => None,
454 }
455}