diff options
author | Rutger Broekhoff | 2024-04-29 19:18:56 +0200 |
---|---|---|
committer | Rutger Broekhoff | 2024-04-29 19:18:56 +0200 |
commit | a7f9c8de31231b9fd9c67c57db659f7b01f1a3b0 (patch) | |
tree | 040c518d243769c1920b8201a07a626f4e5934cf /gitolfs3-server | |
parent | 80c4d49ad7f590f5af1304939fdaea7baf13142e (diff) | |
download | gitolfs3-a7f9c8de31231b9fd9c67c57db659f7b01f1a3b0.tar.gz gitolfs3-a7f9c8de31231b9fd9c67c57db659f7b01f1a3b0.zip |
Rename crates (and therefore commands)
Diffstat (limited to 'gitolfs3-server')
-rw-r--r-- | gitolfs3-server/Cargo.toml | 19 | ||||
-rw-r--r-- | gitolfs3-server/src/main.rs | 1154 |
2 files changed, 1173 insertions, 0 deletions
diff --git a/gitolfs3-server/Cargo.toml b/gitolfs3-server/Cargo.toml new file mode 100644 index 0000000..04edeea --- /dev/null +++ b/gitolfs3-server/Cargo.toml | |||
@@ -0,0 +1,19 @@ | |||
1 | [package] | ||
2 | name = "gitolfs3-server" | ||
3 | version = "0.1.0" | ||
4 | edition = "2021" | ||
5 | |||
6 | [dependencies] | ||
7 | aws-config = { version = "1.1.2" } | ||
8 | aws-sdk-s3 = "1.12.0" | ||
9 | axum = "0.7" | ||
10 | base64 = "0.21" | ||
11 | chrono = { version = "0.4", features = ["serde"] } | ||
12 | gitolfs3-common = { path = "../gitolfs3-common" } | ||
13 | mime = "0.3" | ||
14 | serde = { version = "1", features = ["derive"] } | ||
15 | serde_json = "1" | ||
16 | tokio = { version = "1.35", features = ["full"] } | ||
17 | tokio-util = "0.7" | ||
18 | tower = "0.4" | ||
19 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } | ||
diff --git a/gitolfs3-server/src/main.rs b/gitolfs3-server/src/main.rs new file mode 100644 index 0000000..b05a0c8 --- /dev/null +++ b/gitolfs3-server/src/main.rs | |||
@@ -0,0 +1,1154 @@ | |||
1 | use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput}; | ||
2 | use axum::{ | ||
3 | async_trait, | ||
4 | extract::{rejection, FromRequest, FromRequestParts, OriginalUri, Path, Request, State}, | ||
5 | http::{header, request::Parts, HeaderMap, HeaderValue, StatusCode, Uri}, | ||
6 | response::{IntoResponse, Response}, | ||
7 | routing::{get, post}, | ||
8 | Extension, Json, Router, ServiceExt, | ||
9 | }; | ||
10 | use base64::prelude::*; | ||
11 | use chrono::{DateTime, Utc}; | ||
12 | use gitolfs3_common::{ | ||
13 | generate_tag, load_key, Claims, Digest, HexByte, Key, Oid, Operation, SpecificClaims, | ||
14 | }; | ||
15 | use serde::{ | ||
16 | de::{self, DeserializeOwned}, | ||
17 | Deserialize, Serialize, | ||
18 | }; | ||
19 | use std::{ | ||
20 | collections::{HashMap, HashSet}, | ||
21 | process::ExitCode, | ||
22 | sync::Arc, | ||
23 | }; | ||
24 | use tokio::io::AsyncWriteExt; | ||
25 | use tower::Layer; | ||
26 | |||
27 | #[tokio::main] | ||
28 | async fn main() -> ExitCode { | ||
29 | tracing_subscriber::fmt::init(); | ||
30 | |||
31 | let conf = match Config::load() { | ||
32 | Ok(conf) => conf, | ||
33 | Err(e) => { | ||
34 | println!("Error: {e}"); | ||
35 | return ExitCode::from(2); | ||
36 | } | ||
37 | }; | ||
38 | |||
39 | let dl_limiter = DownloadLimiter::new(conf.download_limit).await; | ||
40 | let dl_limiter = Arc::new(tokio::sync::Mutex::new(dl_limiter)); | ||
41 | |||
42 | let resetter_dl_limiter = dl_limiter.clone(); | ||
43 | tokio::spawn(async move { | ||
44 | loop { | ||
45 | println!("Resetting download counter in one hour"); | ||
46 | tokio::time::sleep(std::time::Duration::from_secs(3600)).await; | ||
47 | println!("Resetting download counter"); | ||
48 | resetter_dl_limiter.lock().await.reset().await; | ||
49 | } | ||
50 | }); | ||
51 | |||
52 | let shared_state = Arc::new(AppState { | ||
53 | s3_client: conf.s3_client, | ||
54 | s3_bucket: conf.s3_bucket, | ||
55 | authz_conf: conf.authz_conf, | ||
56 | base_url: conf.base_url, | ||
57 | dl_limiter, | ||
58 | }); | ||
59 | let app = Router::new() | ||
60 | .route("/batch", post(batch)) | ||
61 | .route("/:oid0/:oid1/:oid", get(obj_download)) | ||
62 | .with_state(shared_state); | ||
63 | |||
64 | let middleware = axum::middleware::map_request(rewrite_url); | ||
65 | let app_with_middleware = middleware.layer(app); | ||
66 | |||
67 | let listener = match tokio::net::TcpListener::bind(conf.listen_addr).await { | ||
68 | Ok(listener) => listener, | ||
69 | Err(e) => { | ||
70 | println!("Failed to listen: {e}"); | ||
71 | return ExitCode::FAILURE; | ||
72 | } | ||
73 | }; | ||
74 | |||
75 | match axum::serve(listener, app_with_middleware.into_make_service()).await { | ||
76 | Ok(_) => ExitCode::SUCCESS, | ||
77 | Err(e) => { | ||
78 | println!("Error serving: {e}"); | ||
79 | ExitCode::FAILURE | ||
80 | } | ||
81 | } | ||
82 | } | ||
83 | |||
84 | #[derive(Clone)] | ||
85 | struct RepositoryName(String); | ||
86 | |||
87 | struct RepositoryNameRejection; | ||
88 | |||
89 | impl IntoResponse for RepositoryNameRejection { | ||
90 | fn into_response(self) -> Response { | ||
91 | (StatusCode::INTERNAL_SERVER_ERROR, "Missing repository name").into_response() | ||
92 | } | ||
93 | } | ||
94 | |||
95 | #[async_trait] | ||
96 | impl<S: Send + Sync> FromRequestParts<S> for RepositoryName { | ||
97 | type Rejection = RepositoryNameRejection; | ||
98 | |||
99 | async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> { | ||
100 | let Ok(Extension(repo_name)) = Extension::<Self>::from_request_parts(parts, state).await | ||
101 | else { | ||
102 | return Err(RepositoryNameRejection); | ||
103 | }; | ||
104 | Ok(repo_name) | ||
105 | } | ||
106 | } | ||
107 | |||
108 | async fn rewrite_url<B>( | ||
109 | mut req: axum::http::Request<B>, | ||
110 | ) -> Result<axum::http::Request<B>, StatusCode> { | ||
111 | let uri = req.uri(); | ||
112 | let original_uri = OriginalUri(uri.clone()); | ||
113 | |||
114 | let Some(path_and_query) = uri.path_and_query() else { | ||
115 | // L @ no path & query | ||
116 | return Err(StatusCode::BAD_REQUEST); | ||
117 | }; | ||
118 | let Some((repo, path)) = path_and_query.path().split_once("/info/lfs/objects") else { | ||
119 | return Err(StatusCode::NOT_FOUND); | ||
120 | }; | ||
121 | let repo = repo | ||
122 | .trim_start_matches('/') | ||
123 | .trim_end_matches('/') | ||
124 | .to_string(); | ||
125 | if !path.starts_with('/') || !repo.ends_with(".git") { | ||
126 | return Err(StatusCode::NOT_FOUND); | ||
127 | } | ||
128 | |||
129 | let mut parts = uri.clone().into_parts(); | ||
130 | parts.path_and_query = match path_and_query.query() { | ||
131 | None => path.try_into().ok(), | ||
132 | Some(q) => format!("{path}?{q}").try_into().ok(), | ||
133 | }; | ||
134 | let Ok(new_uri) = Uri::from_parts(parts) else { | ||
135 | return Err(StatusCode::INTERNAL_SERVER_ERROR); | ||
136 | }; | ||
137 | |||
138 | *req.uri_mut() = new_uri; | ||
139 | req.extensions_mut().insert(original_uri); | ||
140 | req.extensions_mut().insert(RepositoryName(repo)); | ||
141 | |||
142 | Ok(req) | ||
143 | } | ||
144 | |||
145 | struct AppState { | ||
146 | s3_client: aws_sdk_s3::Client, | ||
147 | s3_bucket: String, | ||
148 | authz_conf: AuthorizationConfig, | ||
149 | // Should not end with a slash. | ||
150 | base_url: String, | ||
151 | dl_limiter: Arc<tokio::sync::Mutex<DownloadLimiter>>, | ||
152 | } | ||
153 | |||
154 | struct Env { | ||
155 | s3_access_key_id: String, | ||
156 | s3_secret_access_key: String, | ||
157 | s3_bucket: String, | ||
158 | s3_region: String, | ||
159 | s3_endpoint: String, | ||
160 | base_url: String, | ||
161 | key_path: String, | ||
162 | listen_host: String, | ||
163 | listen_port: String, | ||
164 | download_limit: String, | ||
165 | trusted_forwarded_hosts: String, | ||
166 | } | ||
167 | |||
168 | fn require_env(name: &str) -> Result<String, String> { | ||
169 | std::env::var(name) | ||
170 | .map_err(|_| format!("environment variable {name} should be defined and valid")) | ||
171 | } | ||
172 | |||
173 | impl Env { | ||
174 | fn load() -> Result<Env, String> { | ||
175 | Ok(Env { | ||
176 | s3_secret_access_key: require_env("GITOLFS3_S3_SECRET_ACCESS_KEY_FILE")?, | ||
177 | s3_access_key_id: require_env("GITOLFS3_S3_ACCESS_KEY_ID_FILE")?, | ||
178 | s3_region: require_env("GITOLFS3_S3_REGION")?, | ||
179 | s3_endpoint: require_env("GITOLFS3_S3_ENDPOINT")?, | ||
180 | s3_bucket: require_env("GITOLFS3_S3_BUCKET")?, | ||
181 | base_url: require_env("GITOLFS3_BASE_URL")?, | ||
182 | key_path: require_env("GITOLFS3_KEY_PATH")?, | ||
183 | listen_host: require_env("GITOLFS3_LISTEN_HOST")?, | ||
184 | listen_port: require_env("GITOLFS3_LISTEN_PORT")?, | ||
185 | download_limit: require_env("GITOLFS3_DOWNLOAD_LIMIT")?, | ||
186 | trusted_forwarded_hosts: std::env::var("GITOLFS3_TRUSTED_FORWARDED_HOSTS") | ||
187 | .unwrap_or_default(), | ||
188 | }) | ||
189 | } | ||
190 | } | ||
191 | |||
192 | fn get_s3_client(env: &Env) -> Result<aws_sdk_s3::Client, std::io::Error> { | ||
193 | let access_key_id = std::fs::read_to_string(&env.s3_access_key_id)?; | ||
194 | let secret_access_key = std::fs::read_to_string(&env.s3_secret_access_key)?; | ||
195 | |||
196 | let credentials = aws_sdk_s3::config::Credentials::new( | ||
197 | access_key_id, | ||
198 | secret_access_key, | ||
199 | None, | ||
200 | None, | ||
201 | "gitolfs3-env", | ||
202 | ); | ||
203 | let config = aws_config::SdkConfig::builder() | ||
204 | .behavior_version(aws_config::BehaviorVersion::latest()) | ||
205 | .region(aws_config::Region::new(env.s3_region.clone())) | ||
206 | .endpoint_url(&env.s3_endpoint) | ||
207 | .credentials_provider(aws_sdk_s3::config::SharedCredentialsProvider::new( | ||
208 | credentials, | ||
209 | )) | ||
210 | .build(); | ||
211 | Ok(aws_sdk_s3::Client::new(&config)) | ||
212 | } | ||
213 | |||
214 | struct Config { | ||
215 | listen_addr: (String, u16), | ||
216 | base_url: String, | ||
217 | authz_conf: AuthorizationConfig, | ||
218 | s3_client: aws_sdk_s3::Client, | ||
219 | s3_bucket: String, | ||
220 | download_limit: u64, | ||
221 | } | ||
222 | |||
223 | impl Config { | ||
224 | fn load() -> Result<Self, String> { | ||
225 | let env = match Env::load() { | ||
226 | Ok(env) => env, | ||
227 | Err(e) => return Err(format!("failed to load configuration: {e}")), | ||
228 | }; | ||
229 | |||
230 | let s3_client = match get_s3_client(&env) { | ||
231 | Ok(s3_client) => s3_client, | ||
232 | Err(e) => return Err(format!("failed to create S3 client: {e}")), | ||
233 | }; | ||
234 | let key = match load_key(&env.key_path) { | ||
235 | Ok(key) => key, | ||
236 | Err(e) => return Err(format!("failed to load Gitolfs3 key: {e}")), | ||
237 | }; | ||
238 | |||
239 | let trusted_forwarded_hosts: HashSet<String> = env | ||
240 | .trusted_forwarded_hosts | ||
241 | .split(',') | ||
242 | .map(|s| s.to_owned()) | ||
243 | .filter(|s| !s.is_empty()) | ||
244 | .collect(); | ||
245 | let base_url = env.base_url.trim_end_matches('/').to_string(); | ||
246 | |||
247 | let Ok(listen_port): Result<u16, _> = env.listen_port.parse() else { | ||
248 | return Err("configured GITOLFS3_LISTEN_PORT is invalid".to_string()); | ||
249 | }; | ||
250 | let Ok(download_limit): Result<u64, _> = env.download_limit.parse() else { | ||
251 | return Err("configured GITOLFS3_DOWNLOAD_LIMIT is invalid".to_string()); | ||
252 | }; | ||
253 | |||
254 | Ok(Self { | ||
255 | listen_addr: (env.listen_host, listen_port), | ||
256 | base_url, | ||
257 | authz_conf: AuthorizationConfig { | ||
258 | key, | ||
259 | trusted_forwarded_hosts, | ||
260 | }, | ||
261 | s3_client, | ||
262 | s3_bucket: env.s3_bucket, | ||
263 | download_limit, | ||
264 | }) | ||
265 | } | ||
266 | } | ||
267 | |||
268 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | ||
269 | enum TransferAdapter { | ||
270 | #[serde(rename = "basic")] | ||
271 | Basic, | ||
272 | #[serde(other)] | ||
273 | Unknown, | ||
274 | } | ||
275 | |||
276 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] | ||
277 | enum HashAlgo { | ||
278 | #[serde(rename = "sha256")] | ||
279 | Sha256, | ||
280 | #[serde(other)] | ||
281 | Unknown, | ||
282 | } | ||
283 | |||
284 | impl Default for HashAlgo { | ||
285 | fn default() -> Self { | ||
286 | Self::Sha256 | ||
287 | } | ||
288 | } | ||
289 | |||
290 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] | ||
291 | struct BatchRequestObject { | ||
292 | oid: Oid, | ||
293 | size: i64, | ||
294 | } | ||
295 | |||
296 | #[derive(Debug, Serialize, Deserialize, Clone)] | ||
297 | struct BatchRef { | ||
298 | name: String, | ||
299 | } | ||
300 | |||
301 | fn default_transfers() -> Vec<TransferAdapter> { | ||
302 | vec![TransferAdapter::Basic] | ||
303 | } | ||
304 | |||
305 | #[derive(Debug, Deserialize, PartialEq, Eq, Clone)] | ||
306 | struct BatchRequest { | ||
307 | operation: Operation, | ||
308 | #[serde(default = "default_transfers")] | ||
309 | transfers: Vec<TransferAdapter>, | ||
310 | objects: Vec<BatchRequestObject>, | ||
311 | #[serde(default)] | ||
312 | hash_algo: HashAlgo, | ||
313 | } | ||
314 | |||
315 | #[derive(Debug, Clone)] | ||
316 | struct GitLfsJson<T>(Json<T>); | ||
317 | |||
318 | const LFS_MIME: &str = "application/vnd.git-lfs+json"; | ||
319 | |||
320 | enum GitLfsJsonRejection { | ||
321 | Json(rejection::JsonRejection), | ||
322 | MissingGitLfsJsonContentType, | ||
323 | } | ||
324 | |||
325 | impl IntoResponse for GitLfsJsonRejection { | ||
326 | fn into_response(self) -> Response { | ||
327 | match self { | ||
328 | Self::Json(rej) => rej.into_response(), | ||
329 | Self::MissingGitLfsJsonContentType => make_error_resp( | ||
330 | StatusCode::UNSUPPORTED_MEDIA_TYPE, | ||
331 | &format!("Expected request with `Content-Type: {LFS_MIME}`"), | ||
332 | ) | ||
333 | .into_response(), | ||
334 | } | ||
335 | } | ||
336 | } | ||
337 | |||
338 | fn is_git_lfs_json_mimetype(mimetype: &str) -> bool { | ||
339 | let Ok(mime) = mimetype.parse::<mime::Mime>() else { | ||
340 | return false; | ||
341 | }; | ||
342 | if mime.type_() != mime::APPLICATION | ||
343 | || mime.subtype() != "vnd.git-lfs" | ||
344 | || mime.suffix() != Some(mime::JSON) | ||
345 | { | ||
346 | return false; | ||
347 | } | ||
348 | match mime.get_param(mime::CHARSET) { | ||
349 | Some(mime::UTF_8) | None => true, | ||
350 | Some(_) => false, | ||
351 | } | ||
352 | } | ||
353 | |||
354 | fn has_git_lfs_json_content_type(req: &Request) -> bool { | ||
355 | let Some(content_type) = req.headers().get(header::CONTENT_TYPE) else { | ||
356 | return false; | ||
357 | }; | ||
358 | let Ok(content_type) = content_type.to_str() else { | ||
359 | return false; | ||
360 | }; | ||
361 | is_git_lfs_json_mimetype(content_type) | ||
362 | } | ||
363 | |||
364 | #[async_trait] | ||
365 | impl<T, S> FromRequest<S> for GitLfsJson<T> | ||
366 | where | ||
367 | T: DeserializeOwned, | ||
368 | S: Send + Sync, | ||
369 | { | ||
370 | type Rejection = GitLfsJsonRejection; | ||
371 | |||
372 | async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> { | ||
373 | if !has_git_lfs_json_content_type(&req) { | ||
374 | return Err(GitLfsJsonRejection::MissingGitLfsJsonContentType); | ||
375 | } | ||
376 | Json::<T>::from_request(req, state) | ||
377 | .await | ||
378 | .map(GitLfsJson) | ||
379 | .map_err(GitLfsJsonRejection::Json) | ||
380 | } | ||
381 | } | ||
382 | |||
383 | impl<T: Serialize> IntoResponse for GitLfsJson<T> { | ||
384 | fn into_response(self) -> Response { | ||
385 | let GitLfsJson(json) = self; | ||
386 | let mut resp = json.into_response(); | ||
387 | resp.headers_mut().insert( | ||
388 | header::CONTENT_TYPE, | ||
389 | HeaderValue::from_static("application/vnd.git-lfs+json; charset=utf-8"), | ||
390 | ); | ||
391 | resp | ||
392 | } | ||
393 | } | ||
394 | |||
395 | #[derive(Debug, Serialize)] | ||
396 | struct GitLfsErrorData<'a> { | ||
397 | message: &'a str, | ||
398 | } | ||
399 | |||
400 | type GitLfsErrorResponse<'a> = (StatusCode, GitLfsJson<GitLfsErrorData<'a>>); | ||
401 | |||
402 | const fn make_error_resp(code: StatusCode, message: &str) -> GitLfsErrorResponse { | ||
403 | (code, GitLfsJson(Json(GitLfsErrorData { message }))) | ||
404 | } | ||
405 | |||
406 | #[derive(Debug, Serialize, Clone)] | ||
407 | struct BatchResponseObjectAction { | ||
408 | href: String, | ||
409 | #[serde(skip_serializing_if = "HashMap::is_empty")] | ||
410 | header: HashMap<String, String>, | ||
411 | expires_at: DateTime<Utc>, | ||
412 | } | ||
413 | |||
414 | #[derive(Default, Debug, Serialize, Clone)] | ||
415 | struct BatchResponseObjectActions { | ||
416 | #[serde(skip_serializing_if = "Option::is_none")] | ||
417 | upload: Option<BatchResponseObjectAction>, | ||
418 | #[serde(skip_serializing_if = "Option::is_none")] | ||
419 | download: Option<BatchResponseObjectAction>, | ||
420 | #[serde(skip_serializing_if = "Option::is_none")] | ||
421 | verify: Option<BatchResponseObjectAction>, | ||
422 | } | ||
423 | |||
424 | #[derive(Debug, Clone, Serialize)] | ||
425 | struct BatchResponseObjectError { | ||
426 | code: u16, | ||
427 | message: String, | ||
428 | } | ||
429 | |||
430 | #[derive(Debug, Serialize, Clone)] | ||
431 | struct BatchResponseObject { | ||
432 | oid: Oid, | ||
433 | size: i64, | ||
434 | #[serde(skip_serializing_if = "Option::is_none")] | ||
435 | authenticated: Option<bool>, | ||
436 | actions: BatchResponseObjectActions, | ||
437 | #[serde(skip_serializing_if = "Option::is_none")] | ||
438 | error: Option<BatchResponseObjectError>, | ||
439 | } | ||
440 | |||
441 | impl BatchResponseObject { | ||
442 | fn error(obj: &BatchRequestObject, code: StatusCode, message: String) -> BatchResponseObject { | ||
443 | BatchResponseObject { | ||
444 | oid: obj.oid, | ||
445 | size: obj.size, | ||
446 | authenticated: None, | ||
447 | actions: Default::default(), | ||
448 | error: Some(BatchResponseObjectError { | ||
449 | code: code.as_u16(), | ||
450 | message, | ||
451 | }), | ||
452 | } | ||
453 | } | ||
454 | } | ||
455 | |||
456 | #[derive(Debug, Serialize, Clone)] | ||
457 | struct BatchResponse { | ||
458 | transfer: TransferAdapter, | ||
459 | objects: Vec<BatchResponseObject>, | ||
460 | hash_algo: HashAlgo, | ||
461 | } | ||
462 | |||
463 | fn validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool { | ||
464 | if let Some(checksum) = obj.checksum_sha256() { | ||
465 | if let Ok(checksum) = BASE64_STANDARD.decode(checksum) { | ||
466 | if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) { | ||
467 | return Oid::from(checksum32b) == oid; | ||
468 | } | ||
469 | } | ||
470 | } | ||
471 | true | ||
472 | } | ||
473 | |||
474 | fn validate_size(expected: i64, obj: &HeadObjectOutput) -> bool { | ||
475 | if let Some(length) = obj.content_length() { | ||
476 | return length == expected; | ||
477 | } | ||
478 | true | ||
479 | } | ||
480 | |||
481 | async fn handle_upload_object( | ||
482 | state: &AppState, | ||
483 | repo: &str, | ||
484 | obj: &BatchRequestObject, | ||
485 | ) -> Option<BatchResponseObject> { | ||
486 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); | ||
487 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); | ||
488 | |||
489 | match state | ||
490 | .s3_client | ||
491 | .head_object() | ||
492 | .bucket(&state.s3_bucket) | ||
493 | .key(full_path.clone()) | ||
494 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) | ||
495 | .send() | ||
496 | .await | ||
497 | { | ||
498 | Ok(result) => { | ||
499 | if validate_size(obj.size, &result) && validate_checksum(obj.oid, &result) { | ||
500 | return None; | ||
501 | } | ||
502 | } | ||
503 | Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {} | ||
504 | Err(e) => { | ||
505 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); | ||
506 | return Some(BatchResponseObject::error( | ||
507 | obj, | ||
508 | StatusCode::INTERNAL_SERVER_ERROR, | ||
509 | "Failed to query object information".to_string(), | ||
510 | )); | ||
511 | } | ||
512 | }; | ||
513 | |||
514 | let expires_in = std::time::Duration::from_secs(5 * 60); | ||
515 | let expires_at = Utc::now() + expires_in; | ||
516 | |||
517 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | ||
518 | return Some(BatchResponseObject::error( | ||
519 | obj, | ||
520 | StatusCode::INTERNAL_SERVER_ERROR, | ||
521 | "Failed to generate upload URL".to_string(), | ||
522 | )); | ||
523 | }; | ||
524 | let Ok(presigned) = state | ||
525 | .s3_client | ||
526 | .put_object() | ||
527 | .bucket(&state.s3_bucket) | ||
528 | .key(full_path) | ||
529 | .checksum_sha256(obj.oid.to_string()) | ||
530 | .content_length(obj.size) | ||
531 | .presigned(config) | ||
532 | .await | ||
533 | else { | ||
534 | return Some(BatchResponseObject::error( | ||
535 | obj, | ||
536 | StatusCode::INTERNAL_SERVER_ERROR, | ||
537 | "Failed to generate upload URL".to_string(), | ||
538 | )); | ||
539 | }; | ||
540 | Some(BatchResponseObject { | ||
541 | oid: obj.oid, | ||
542 | size: obj.size, | ||
543 | authenticated: Some(true), | ||
544 | actions: BatchResponseObjectActions { | ||
545 | upload: Some(BatchResponseObjectAction { | ||
546 | header: presigned | ||
547 | .headers() | ||
548 | .map(|(k, v)| (k.to_owned(), v.to_owned())) | ||
549 | .collect(), | ||
550 | expires_at, | ||
551 | href: presigned.uri().to_string(), | ||
552 | }), | ||
553 | ..Default::default() | ||
554 | }, | ||
555 | error: None, | ||
556 | }) | ||
557 | } | ||
558 | |||
559 | async fn handle_download_object( | ||
560 | state: &AppState, | ||
561 | repo: &str, | ||
562 | obj: &BatchRequestObject, | ||
563 | trusted: bool, | ||
564 | ) -> BatchResponseObject { | ||
565 | let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); | ||
566 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); | ||
567 | |||
568 | let result = match state | ||
569 | .s3_client | ||
570 | .head_object() | ||
571 | .bucket(&state.s3_bucket) | ||
572 | .key(&full_path) | ||
573 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) | ||
574 | .send() | ||
575 | .await | ||
576 | { | ||
577 | Ok(result) => result, | ||
578 | Err(e) => { | ||
579 | println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); | ||
580 | return BatchResponseObject::error( | ||
581 | obj, | ||
582 | StatusCode::INTERNAL_SERVER_ERROR, | ||
583 | "Failed to query object information".to_string(), | ||
584 | ); | ||
585 | } | ||
586 | }; | ||
587 | |||
588 | // Scaleway actually doesn't provide SHA256 suport, but maybe in the future :) | ||
589 | if !validate_checksum(obj.oid, &result) { | ||
590 | return BatchResponseObject::error( | ||
591 | obj, | ||
592 | StatusCode::UNPROCESSABLE_ENTITY, | ||
593 | "Object corrupted".to_string(), | ||
594 | ); | ||
595 | } | ||
596 | if !validate_size(obj.size, &result) { | ||
597 | return BatchResponseObject::error( | ||
598 | obj, | ||
599 | StatusCode::UNPROCESSABLE_ENTITY, | ||
600 | "Incorrect size specified (or object corrupted)".to_string(), | ||
601 | ); | ||
602 | } | ||
603 | |||
604 | let expires_in = std::time::Duration::from_secs(5 * 60); | ||
605 | let expires_at = Utc::now() + expires_in; | ||
606 | |||
607 | if trusted { | ||
608 | let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { | ||
609 | return BatchResponseObject::error( | ||
610 | obj, | ||
611 | StatusCode::INTERNAL_SERVER_ERROR, | ||
612 | "Failed to generate upload URL".to_string(), | ||
613 | ); | ||
614 | }; | ||
615 | let Ok(presigned) = state | ||
616 | .s3_client | ||
617 | .get_object() | ||
618 | .bucket(&state.s3_bucket) | ||
619 | .key(full_path) | ||
620 | .presigned(config) | ||
621 | .await | ||
622 | else { | ||
623 | return BatchResponseObject::error( | ||
624 | obj, | ||
625 | StatusCode::INTERNAL_SERVER_ERROR, | ||
626 | "Failed to generate upload URL".to_string(), | ||
627 | ); | ||
628 | }; | ||
629 | return BatchResponseObject { | ||
630 | oid: obj.oid, | ||
631 | size: obj.size, | ||
632 | authenticated: Some(true), | ||
633 | actions: BatchResponseObjectActions { | ||
634 | download: Some(BatchResponseObjectAction { | ||
635 | header: presigned | ||
636 | .headers() | ||
637 | .map(|(k, v)| (k.to_owned(), v.to_owned())) | ||
638 | .collect(), | ||
639 | expires_at, | ||
640 | href: presigned.uri().to_string(), | ||
641 | }), | ||
642 | ..Default::default() | ||
643 | }, | ||
644 | error: None, | ||
645 | }; | ||
646 | } | ||
647 | |||
648 | if let Some(content_length) = result.content_length() { | ||
649 | if content_length > 0 { | ||
650 | match state | ||
651 | .dl_limiter | ||
652 | .lock() | ||
653 | .await | ||
654 | .request(content_length as u64) | ||
655 | .await | ||
656 | { | ||
657 | Ok(true) => {} | ||
658 | Ok(false) => { | ||
659 | return BatchResponseObject::error( | ||
660 | obj, | ||
661 | StatusCode::SERVICE_UNAVAILABLE, | ||
662 | "Public LFS downloads temporarily unavailable".to_string(), | ||
663 | ); | ||
664 | } | ||
665 | Err(e) => { | ||
666 | println!("Failed to request {content_length} bytes from download limiter: {e}"); | ||
667 | return BatchResponseObject::error( | ||
668 | obj, | ||
669 | StatusCode::INTERNAL_SERVER_ERROR, | ||
670 | "Internal server error".to_string(), | ||
671 | ); | ||
672 | } | ||
673 | } | ||
674 | } | ||
675 | } | ||
676 | |||
677 | let Some(tag) = generate_tag( | ||
678 | Claims { | ||
679 | specific_claims: SpecificClaims::Download(obj.oid), | ||
680 | repo_path: repo, | ||
681 | expires_at, | ||
682 | }, | ||
683 | &state.authz_conf.key, | ||
684 | ) else { | ||
685 | return BatchResponseObject::error( | ||
686 | obj, | ||
687 | StatusCode::INTERNAL_SERVER_ERROR, | ||
688 | "Internal server error".to_string(), | ||
689 | ); | ||
690 | }; | ||
691 | |||
692 | let upload_path = format!( | ||
693 | "{repo}/info/lfs/objects/{}/{}/{}", | ||
694 | HexByte(obj.oid[0]), | ||
695 | HexByte(obj.oid[1]), | ||
696 | obj.oid, | ||
697 | ); | ||
698 | |||
699 | BatchResponseObject { | ||
700 | oid: obj.oid, | ||
701 | size: obj.size, | ||
702 | authenticated: Some(true), | ||
703 | actions: BatchResponseObjectActions { | ||
704 | download: Some(BatchResponseObjectAction { | ||
705 | header: { | ||
706 | let mut map = HashMap::new(); | ||
707 | map.insert( | ||
708 | "Authorization".to_string(), | ||
709 | format!("Gitolfs3-Hmac-Sha256 {tag} {}", expires_at.timestamp()), | ||
710 | ); | ||
711 | map | ||
712 | }, | ||
713 | expires_at, | ||
714 | href: format!("{}/{upload_path}", state.base_url), | ||
715 | }), | ||
716 | ..Default::default() | ||
717 | }, | ||
718 | error: None, | ||
719 | } | ||
720 | } | ||
721 | |||
722 | struct AuthorizationConfig { | ||
723 | trusted_forwarded_hosts: HashSet<String>, | ||
724 | key: Key, | ||
725 | } | ||
726 | |||
727 | struct Trusted(bool); | ||
728 | |||
729 | fn forwarded_from_trusted_host( | ||
730 | headers: &HeaderMap, | ||
731 | trusted: &HashSet<String>, | ||
732 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | ||
733 | if let Some(forwarded_host) = headers.get("X-Forwarded-Host") { | ||
734 | if let Ok(forwarded_host) = forwarded_host.to_str() { | ||
735 | if trusted.contains(forwarded_host) { | ||
736 | return Ok(true); | ||
737 | } | ||
738 | } else { | ||
739 | return Err(make_error_resp( | ||
740 | StatusCode::NOT_FOUND, | ||
741 | "Invalid X-Forwarded-Host header", | ||
742 | )); | ||
743 | } | ||
744 | } | ||
745 | Ok(false) | ||
746 | } | ||
747 | |||
748 | const REPO_NOT_FOUND: GitLfsErrorResponse = | ||
749 | make_error_resp(StatusCode::NOT_FOUND, "Repository not found"); | ||
750 | |||
751 | fn authorize_batch( | ||
752 | conf: &AuthorizationConfig, | ||
753 | repo_path: &str, | ||
754 | public: bool, | ||
755 | operation: Operation, | ||
756 | headers: &HeaderMap, | ||
757 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { | ||
758 | // - No authentication required for downloading exported repos | ||
759 | // - When authenticated: | ||
760 | // - Download / upload over presigned URLs | ||
761 | // - When accessing over Tailscale: | ||
762 | // - No authentication required for downloading from any repo | ||
763 | |||
764 | let claims = VerifyClaimsInput { | ||
765 | specific_claims: SpecificClaims::BatchApi(operation), | ||
766 | repo_path, | ||
767 | }; | ||
768 | if !verify_claims(conf, &claims, headers)? { | ||
769 | return authorize_batch_unauthenticated(conf, public, operation, headers); | ||
770 | } | ||
771 | Ok(Trusted(true)) | ||
772 | } | ||
773 | |||
774 | fn authorize_batch_unauthenticated( | ||
775 | conf: &AuthorizationConfig, | ||
776 | public: bool, | ||
777 | operation: Operation, | ||
778 | headers: &HeaderMap, | ||
779 | ) -> Result<Trusted, GitLfsErrorResponse<'static>> { | ||
780 | let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; | ||
781 | match operation { | ||
782 | Operation::Upload => { | ||
783 | // Trusted users can clone all repositories (by virtue of accessing the server via a | ||
784 | // trusted network). However, they can not push without proper authentication. Untrusted | ||
785 | // users who are also not authenticated should not need to know which repositories exists. | ||
786 | // Therefore, we tell untrusted && unauthenticated users that the repo doesn't exist, but | ||
787 | // tell trusted users that they need to authenticate. | ||
788 | if !trusted { | ||
789 | return Err(REPO_NOT_FOUND); | ||
790 | } | ||
791 | Err(make_error_resp( | ||
792 | StatusCode::FORBIDDEN, | ||
793 | "Authentication required to upload", | ||
794 | )) | ||
795 | } | ||
796 | Operation::Download => { | ||
797 | // Again, trusted users can see all repos. For untrusted users, we first need to check | ||
798 | // whether the repo is public before we authorize. If the user is untrusted and the | ||
799 | // repo isn't public, we just act like it doesn't even exist. | ||
800 | if !trusted { | ||
801 | if !public { | ||
802 | return Err(REPO_NOT_FOUND); | ||
803 | } | ||
804 | return Ok(Trusted(false)); | ||
805 | } | ||
806 | Ok(Trusted(true)) | ||
807 | } | ||
808 | } | ||
809 | } | ||
810 | |||
811 | fn repo_exists(name: &str) -> bool { | ||
812 | let Ok(metadata) = std::fs::metadata(name) else { | ||
813 | return false; | ||
814 | }; | ||
815 | metadata.is_dir() | ||
816 | } | ||
817 | |||
818 | fn is_repo_public(name: &str) -> Option<bool> { | ||
819 | if !repo_exists(name) { | ||
820 | return None; | ||
821 | } | ||
822 | match std::fs::metadata(format!("{name}/git-daemon-export-ok")) { | ||
823 | Ok(metadata) if metadata.is_file() => Some(true), | ||
824 | Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false), | ||
825 | _ => None, | ||
826 | } | ||
827 | } | ||
828 | |||
829 | async fn batch( | ||
830 | State(state): State<Arc<AppState>>, | ||
831 | headers: HeaderMap, | ||
832 | RepositoryName(repo): RepositoryName, | ||
833 | GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>, | ||
834 | ) -> Response { | ||
835 | let Some(public) = is_repo_public(&repo) else { | ||
836 | return REPO_NOT_FOUND.into_response(); | ||
837 | }; | ||
838 | let Trusted(trusted) = match authorize_batch( | ||
839 | &state.authz_conf, | ||
840 | &repo, | ||
841 | public, | ||
842 | payload.operation, | ||
843 | &headers, | ||
844 | ) { | ||
845 | Ok(authn) => authn, | ||
846 | Err(e) => return e.into_response(), | ||
847 | }; | ||
848 | |||
849 | if !headers | ||
850 | .get_all("Accept") | ||
851 | .iter() | ||
852 | .filter_map(|v| v.to_str().ok()) | ||
853 | .any(is_git_lfs_json_mimetype) | ||
854 | { | ||
855 | let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types"); | ||
856 | return make_error_resp(StatusCode::NOT_ACCEPTABLE, &message).into_response(); | ||
857 | } | ||
858 | |||
859 | if payload.hash_algo != HashAlgo::Sha256 { | ||
860 | let message = "Unsupported hashing algorithm specified"; | ||
861 | return make_error_resp(StatusCode::CONFLICT, message).into_response(); | ||
862 | } | ||
863 | if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) { | ||
864 | let message = "Unsupported transfer adapter specified (supported: basic)"; | ||
865 | return make_error_resp(StatusCode::CONFLICT, message).into_response(); | ||
866 | } | ||
867 | |||
868 | let mut resp = BatchResponse { | ||
869 | transfer: TransferAdapter::Basic, | ||
870 | objects: vec![], | ||
871 | hash_algo: HashAlgo::Sha256, | ||
872 | }; | ||
873 | for obj in payload.objects { | ||
874 | match payload.operation { | ||
875 | Operation::Download => resp | ||
876 | .objects | ||
877 | .push(handle_download_object(&state, &repo, &obj, trusted).await), | ||
878 | Operation::Upload => { | ||
879 | if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await { | ||
880 | resp.objects.push(obj_resp); | ||
881 | } | ||
882 | } | ||
883 | }; | ||
884 | } | ||
885 | GitLfsJson(Json(resp)).into_response() | ||
886 | } | ||
887 | |||
888 | #[derive(Deserialize, Copy, Clone)] | ||
889 | #[serde(remote = "Self")] | ||
890 | struct FileParams { | ||
891 | oid0: HexByte, | ||
892 | oid1: HexByte, | ||
893 | oid: Oid, | ||
894 | } | ||
895 | |||
896 | impl<'de> Deserialize<'de> for FileParams { | ||
897 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | ||
898 | where | ||
899 | D: serde::Deserializer<'de>, | ||
900 | { | ||
901 | let unchecked @ FileParams { | ||
902 | oid0: HexByte(oid0), | ||
903 | oid1: HexByte(oid1), | ||
904 | oid, | ||
905 | } = FileParams::deserialize(deserializer)?; | ||
906 | if oid0 != oid.as_bytes()[0] { | ||
907 | return Err(de::Error::custom( | ||
908 | "first OID path part does not match first byte of full OID", | ||
909 | )); | ||
910 | } | ||
911 | if oid1 != oid.as_bytes()[1] { | ||
912 | return Err(de::Error::custom( | ||
913 | "second OID path part does not match first byte of full OID", | ||
914 | )); | ||
915 | } | ||
916 | Ok(unchecked) | ||
917 | } | ||
918 | } | ||
919 | |||
920 | pub struct VerifyClaimsInput<'a> { | ||
921 | pub specific_claims: SpecificClaims, | ||
922 | pub repo_path: &'a str, | ||
923 | } | ||
924 | |||
925 | fn verify_claims( | ||
926 | conf: &AuthorizationConfig, | ||
927 | claims: &VerifyClaimsInput, | ||
928 | headers: &HeaderMap, | ||
929 | ) -> Result<bool, GitLfsErrorResponse<'static>> { | ||
930 | const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = | ||
931 | make_error_resp(StatusCode::BAD_REQUEST, "Invalid authorization header"); | ||
932 | |||
933 | let Some(authz) = headers.get(header::AUTHORIZATION) else { | ||
934 | return Ok(false); | ||
935 | }; | ||
936 | let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; | ||
937 | let val = authz | ||
938 | .strip_prefix("Gitolfs3-Hmac-Sha256 ") | ||
939 | .ok_or(INVALID_AUTHZ_HEADER)?; | ||
940 | let (tag, expires_at) = val.split_once(' ').ok_or(INVALID_AUTHZ_HEADER)?; | ||
941 | let tag: Digest<32> = tag.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; | ||
942 | let expires_at: i64 = expires_at.parse().map_err(|_| INVALID_AUTHZ_HEADER)?; | ||
943 | let expires_at = DateTime::<Utc>::from_timestamp(expires_at, 0).ok_or(INVALID_AUTHZ_HEADER)?; | ||
944 | let expected_tag = generate_tag( | ||
945 | Claims { | ||
946 | specific_claims: claims.specific_claims, | ||
947 | repo_path: claims.repo_path, | ||
948 | expires_at, | ||
949 | }, | ||
950 | &conf.key, | ||
951 | ) | ||
952 | .ok_or_else(|| make_error_resp(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error"))?; | ||
953 | if tag != expected_tag { | ||
954 | return Err(INVALID_AUTHZ_HEADER); | ||
955 | } | ||
956 | |||
957 | Ok(true) | ||
958 | } | ||
959 | |||
960 | fn authorize_get( | ||
961 | conf: &AuthorizationConfig, | ||
962 | repo_path: &str, | ||
963 | oid: Oid, | ||
964 | headers: &HeaderMap, | ||
965 | ) -> Result<(), GitLfsErrorResponse<'static>> { | ||
966 | let claims = VerifyClaimsInput { | ||
967 | specific_claims: SpecificClaims::Download(oid), | ||
968 | repo_path, | ||
969 | }; | ||
970 | if !verify_claims(conf, &claims, headers)? { | ||
971 | return Err(make_error_resp( | ||
972 | StatusCode::UNAUTHORIZED, | ||
973 | "Repository not found", | ||
974 | )); | ||
975 | } | ||
976 | Ok(()) | ||
977 | } | ||
978 | |||
979 | async fn obj_download( | ||
980 | State(state): State<Arc<AppState>>, | ||
981 | headers: HeaderMap, | ||
982 | RepositoryName(repo): RepositoryName, | ||
983 | Path(FileParams { oid0, oid1, oid }): Path<FileParams>, | ||
984 | ) -> Response { | ||
985 | if let Err(e) = authorize_get(&state.authz_conf, &repo, oid, &headers) { | ||
986 | return e.into_response(); | ||
987 | } | ||
988 | |||
989 | let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, oid); | ||
990 | let result = match state | ||
991 | .s3_client | ||
992 | .get_object() | ||
993 | .bucket(&state.s3_bucket) | ||
994 | .key(full_path) | ||
995 | .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled) | ||
996 | .send() | ||
997 | .await | ||
998 | { | ||
999 | Ok(result) => result, | ||
1000 | Err(e) => { | ||
1001 | println!("Failed to GetObject (repo {repo}, OID {oid}): {e}"); | ||
1002 | return ( | ||
1003 | StatusCode::INTERNAL_SERVER_ERROR, | ||
1004 | "Failed to query object information", | ||
1005 | ) | ||
1006 | .into_response(); | ||
1007 | } | ||
1008 | }; | ||
1009 | |||
1010 | let mut headers = header::HeaderMap::new(); | ||
1011 | if let Some(content_type) = result.content_type { | ||
1012 | let Ok(header_value) = content_type.try_into() else { | ||
1013 | return ( | ||
1014 | StatusCode::INTERNAL_SERVER_ERROR, | ||
1015 | "Object has invalid content type", | ||
1016 | ) | ||
1017 | .into_response(); | ||
1018 | }; | ||
1019 | headers.insert(header::CONTENT_TYPE, header_value); | ||
1020 | } | ||
1021 | if let Some(content_length) = result.content_length { | ||
1022 | headers.insert(header::CONTENT_LENGTH, content_length.into()); | ||
1023 | } | ||
1024 | |||
1025 | let async_read = result.body.into_async_read(); | ||
1026 | let stream = tokio_util::io::ReaderStream::new(async_read); | ||
1027 | let body = axum::body::Body::from_stream(stream); | ||
1028 | |||
1029 | (headers, body).into_response() | ||
1030 | } | ||
1031 | |||
1032 | struct DownloadLimiter { | ||
1033 | current: u64, | ||
1034 | limit: u64, | ||
1035 | } | ||
1036 | |||
1037 | impl DownloadLimiter { | ||
1038 | async fn new(limit: u64) -> DownloadLimiter { | ||
1039 | let dlimit_str = match tokio::fs::read_to_string(".gitolfs3-dlimit").await { | ||
1040 | Ok(dlimit_str) => dlimit_str, | ||
1041 | Err(e) => { | ||
1042 | println!("Failed to read download counter, assuming 0: {e}"); | ||
1043 | return DownloadLimiter { current: 0, limit }; | ||
1044 | } | ||
1045 | }; | ||
1046 | let current: u64 = match dlimit_str | ||
1047 | .parse() | ||
1048 | .map_err(|e| tokio::io::Error::new(tokio::io::ErrorKind::InvalidData, e)) | ||
1049 | { | ||
1050 | Ok(current) => current, | ||
1051 | Err(e) => { | ||
1052 | println!("Failed to read download counter, assuming 0: {e}"); | ||
1053 | return DownloadLimiter { current: 0, limit }; | ||
1054 | } | ||
1055 | }; | ||
1056 | DownloadLimiter { current, limit } | ||
1057 | } | ||
1058 | |||
1059 | async fn request(&mut self, n: u64) -> tokio::io::Result<bool> { | ||
1060 | if self.current + n > self.limit { | ||
1061 | return Ok(false); | ||
1062 | } | ||
1063 | self.current += n; | ||
1064 | self.write_new_count().await?; | ||
1065 | Ok(true) | ||
1066 | } | ||
1067 | |||
1068 | async fn reset(&mut self) { | ||
1069 | self.current = 0; | ||
1070 | if let Err(e) = self.write_new_count().await { | ||
1071 | println!("Failed to reset download counter: {e}"); | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | async fn write_new_count(&self) -> tokio::io::Result<()> { | ||
1076 | let cwd = tokio::fs::File::open(std::env::current_dir()?).await?; | ||
1077 | let mut file = tokio::fs::File::create(".gitolfs3-dlimit.tmp").await?; | ||
1078 | file.write_all(self.current.to_string().as_bytes()).await?; | ||
1079 | file.sync_all().await?; | ||
1080 | tokio::fs::rename(".gitolfs3-dlimit.tmp", ".gitolfs3-dlimit").await?; | ||
1081 | cwd.sync_all().await | ||
1082 | } | ||
1083 | } | ||
1084 | |||
1085 | #[test] | ||
1086 | fn test_mimetype() { | ||
1087 | assert!(is_git_lfs_json_mimetype("application/vnd.git-lfs+json")); | ||
1088 | assert!(!is_git_lfs_json_mimetype("application/vnd.git-lfs")); | ||
1089 | assert!(!is_git_lfs_json_mimetype("application/json")); | ||
1090 | assert!(is_git_lfs_json_mimetype( | ||
1091 | "application/vnd.git-lfs+json; charset=utf-8" | ||
1092 | )); | ||
1093 | assert!(is_git_lfs_json_mimetype( | ||
1094 | "application/vnd.git-lfs+json; charset=UTF-8" | ||
1095 | )); | ||
1096 | assert!(!is_git_lfs_json_mimetype( | ||
1097 | "application/vnd.git-lfs+json; charset=ISO-8859-1" | ||
1098 | )); | ||
1099 | } | ||
1100 | |||
1101 | #[test] | ||
1102 | fn test_deserialize() { | ||
1103 | let json = r#"{"operation":"upload","objects":[{"oid":"8f4123f9a7181f488c5e111d82cefd992e461ae5df01fd2254399e6e670b2d3c","size":170904}], | ||
1104 | "transfers":["lfs-standalone-file","basic","ssh"],"ref":{"name":"refs/heads/main"},"hash_algo":"sha256"}"#; | ||
1105 | let expected = BatchRequest { | ||
1106 | operation: Operation::Upload, | ||
1107 | objects: vec![BatchRequestObject { | ||
1108 | oid: "8f4123f9a7181f488c5e111d82cefd992e461ae5df01fd2254399e6e670b2d3c" | ||
1109 | .parse() | ||
1110 | .unwrap(), | ||
1111 | size: 170904, | ||
1112 | }], | ||
1113 | transfers: vec![ | ||
1114 | TransferAdapter::Unknown, | ||
1115 | TransferAdapter::Basic, | ||
1116 | TransferAdapter::Unknown, | ||
1117 | ], | ||
1118 | hash_algo: HashAlgo::Sha256, | ||
1119 | }; | ||
1120 | assert_eq!( | ||
1121 | serde_json::from_str::<BatchRequest>(json).unwrap(), | ||
1122 | expected | ||
1123 | ); | ||
1124 | } | ||
1125 | |||
1126 | #[test] | ||
1127 | fn test_validate_claims() { | ||
1128 | let key = "00232f7a019bd34e3921ee6c5f04caf48a4489d1be5d1999038950a7054e0bfea369ce2becc0f13fd3c69f8af2384a25b7ac2d52eb52c33722f3c00c50d4c9c2"; | ||
1129 | let key: Key = key.parse().unwrap(); | ||
1130 | |||
1131 | let claims = Claims { | ||
1132 | expires_at: Utc::now() + std::time::Duration::from_secs(5 * 60), | ||
1133 | repo_path: "lfs-test.git", | ||
1134 | specific_claims: SpecificClaims::BatchApi(Operation::Download), | ||
1135 | }; | ||
1136 | let tag = generate_tag(claims, &key).unwrap(); | ||
1137 | let header_value = format!( | ||
1138 | "Gitolfs3-Hmac-Sha256 {tag} {}", | ||
1139 | claims.expires_at.timestamp() | ||
1140 | ); | ||
1141 | |||
1142 | let conf = AuthorizationConfig { | ||
1143 | key, | ||
1144 | trusted_forwarded_hosts: HashSet::new(), | ||
1145 | }; | ||
1146 | let verification_claims = VerifyClaimsInput { | ||
1147 | repo_path: claims.repo_path, | ||
1148 | specific_claims: claims.specific_claims, | ||
1149 | }; | ||
1150 | let mut headers = HeaderMap::new(); | ||
1151 | headers.insert(header::AUTHORIZATION, header_value.try_into().unwrap()); | ||
1152 | |||
1153 | assert!(verify_claims(&conf, &verification_claims, &headers).unwrap()); | ||
1154 | } | ||