aboutsummaryrefslogtreecommitdiffstats
path: root/gitolfs3-server
diff options
context:
space:
mode:
Diffstat (limited to 'gitolfs3-server')
-rw-r--r--gitolfs3-server/Cargo.toml14
-rw-r--r--gitolfs3-server/src/api.rs220
-rw-r--r--gitolfs3-server/src/authz.rs84
-rw-r--r--gitolfs3-server/src/config.rs124
-rw-r--r--gitolfs3-server/src/dlimit.rs2
-rw-r--r--gitolfs3-server/src/handler.rs468
-rw-r--r--gitolfs3-server/src/main.rs25
7 files changed, 484 insertions, 453 deletions
diff --git a/gitolfs3-server/Cargo.toml b/gitolfs3-server/Cargo.toml
index efea78b..5908770 100644
--- a/gitolfs3-server/Cargo.toml
+++ b/gitolfs3-server/Cargo.toml
@@ -1,20 +1,20 @@
1[package] 1[package]
2name = "gitolfs3-server" 2name = "gitolfs3-server"
3version = "0.1.0" 3version = "0.1.0"
4edition = "2021" 4edition = "2024"
5license = "MIT" 5license = "MIT"
6 6
7[dependencies] 7[dependencies]
8aws-config = { version = "1.1.2" } 8aws-config = "1.6"
9aws-sdk-s3 = "1.12.0" 9aws-sdk-s3 = "1.82"
10axum = "0.7" 10axum = "0.8"
11base64 = "0.21" 11base64 = "0.22"
12chrono = { version = "0.4", features = ["serde"] } 12chrono = { version = "0.4", features = ["serde"] }
13gitolfs3-common = { path = "../gitolfs3-common" } 13gitolfs3-common = { path = "../gitolfs3-common" }
14mime = "0.3" 14mime = "0.3"
15serde = { version = "1", features = ["derive"] } 15serde = { version = "1", features = ["derive"] }
16serde_json = "1" 16serde_json = "1"
17tokio = { version = "1.35", features = ["full"] } 17tokio = { version = "1.44", features = ["full"] }
18tokio-util = "0.7" 18tokio-util = "0.7"
19tower = "0.4" 19tower = "0.5"
20tracing-subscriber = { version = "0.3", features = ["env-filter"] } 20tracing-subscriber = { version = "0.3", features = ["env-filter"] }
diff --git a/gitolfs3-server/src/api.rs b/gitolfs3-server/src/api.rs
index dba7ada..e1a2983 100644
--- a/gitolfs3-server/src/api.rs
+++ b/gitolfs3-server/src/api.rs
@@ -1,89 +1,30 @@
1use std::collections::HashMap; 1use std::collections::HashMap;
2 2
3use axum::{ 3use axum::{
4 async_trait,
5 extract::{rejection, FromRequest, FromRequestParts, Request},
6 http::{header, request::Parts, HeaderValue, StatusCode},
7 response::{IntoResponse, Response},
8 Extension, Json, 4 Extension, Json,
5 extract::{FromRequest, FromRequestParts, Request, rejection},
6 http,
7 response::{IntoResponse, Response},
9}; 8};
10use chrono::{DateTime, Utc}; 9use chrono::{DateTime, Utc};
11use gitolfs3_common::{Oid, Operation}; 10use gitolfs3_common::{Oid, Operation};
12use serde::{de::DeserializeOwned, Deserialize, Serialize}; 11use serde::{Deserialize, Serialize, de::DeserializeOwned};
13
14pub const REPO_NOT_FOUND: GitLfsErrorResponse =
15 make_error_resp(StatusCode::NOT_FOUND, "Repository not found");
16
17#[derive(Clone)]
18pub struct RepositoryName(pub String);
19
20pub struct RepositoryNameRejection;
21
22impl IntoResponse for RepositoryNameRejection {
23 fn into_response(self) -> Response {
24 (StatusCode::INTERNAL_SERVER_ERROR, "Missing repository name").into_response()
25 }
26}
27
28#[async_trait]
29impl<S: Send + Sync> FromRequestParts<S> for RepositoryName {
30 type Rejection = RepositoryNameRejection;
31
32 async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
33 let Ok(Extension(repo_name)) = Extension::<Self>::from_request_parts(parts, state).await
34 else {
35 return Err(RepositoryNameRejection);
36 };
37 Ok(repo_name)
38 }
39}
40 12
41#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] 13// ----------------------- Generic facilities ----------------------
42pub enum TransferAdapter {
43 #[serde(rename = "basic")]
44 Basic,
45 #[serde(other)]
46 Unknown,
47}
48 14
49#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] 15pub type GitLfsErrorResponse<'a> = (http::StatusCode, GitLfsJson<GitLfsErrorData<'a>>);
50pub enum HashAlgo {
51 #[serde(rename = "sha256")]
52 Sha256,
53 #[serde(other)]
54 Unknown,
55}
56 16
57impl Default for HashAlgo { 17#[derive(Debug, Serialize)]
58 fn default() -> Self { 18pub struct GitLfsErrorData<'a> {
59 Self::Sha256 19 pub message: &'a str,
60 }
61}
62
63#[derive(Debug, Deserialize, PartialEq, Eq, Clone)]
64pub struct BatchRequestObject {
65 pub oid: Oid,
66 pub size: i64,
67}
68
69#[derive(Debug, Serialize, Deserialize, Clone)]
70struct BatchRef {
71 name: String,
72} 20}
73 21
74fn default_transfers() -> Vec<TransferAdapter> { 22pub const fn make_error_resp(code: http::StatusCode, message: &str) -> GitLfsErrorResponse {
75 vec![TransferAdapter::Basic] 23 (code, GitLfsJson(Json(GitLfsErrorData { message })))
76} 24}
77 25
78#[derive(Debug, Deserialize, PartialEq, Eq, Clone)] 26pub const REPO_NOT_FOUND: GitLfsErrorResponse =
79pub struct BatchRequest { 27 make_error_resp(http::StatusCode::NOT_FOUND, "Repository not found");
80 pub operation: Operation,
81 #[serde(default = "default_transfers")]
82 pub transfers: Vec<TransferAdapter>,
83 pub objects: Vec<BatchRequestObject>,
84 #[serde(default)]
85 pub hash_algo: HashAlgo,
86}
87 28
88#[derive(Debug, Clone)] 29#[derive(Debug, Clone)]
89pub struct GitLfsJson<T>(pub Json<T>); 30pub struct GitLfsJson<T>(pub Json<T>);
@@ -100,7 +41,7 @@ impl IntoResponse for GitLfsJsonRejection {
100 match self { 41 match self {
101 Self::Json(rej) => rej.into_response(), 42 Self::Json(rej) => rej.into_response(),
102 Self::MissingGitLfsJsonContentType => make_error_resp( 43 Self::MissingGitLfsJsonContentType => make_error_resp(
103 StatusCode::UNSUPPORTED_MEDIA_TYPE, 44 http::StatusCode::UNSUPPORTED_MEDIA_TYPE,
104 &format!("Expected request with `Content-Type: {LFS_MIME}`"), 45 &format!("Expected request with `Content-Type: {LFS_MIME}`"),
105 ) 46 )
106 .into_response(), 47 .into_response(),
@@ -125,7 +66,7 @@ pub fn is_git_lfs_json_mimetype(mimetype: &str) -> bool {
125} 66}
126 67
127fn has_git_lfs_json_content_type(req: &Request) -> bool { 68fn has_git_lfs_json_content_type(req: &Request) -> bool {
128 let Some(content_type) = req.headers().get(header::CONTENT_TYPE) else { 69 let Some(content_type) = req.headers().get(http::header::CONTENT_TYPE) else {
129 return false; 70 return false;
130 }; 71 };
131 let Ok(content_type) = content_type.to_str() else { 72 let Ok(content_type) = content_type.to_str() else {
@@ -134,7 +75,6 @@ fn has_git_lfs_json_content_type(req: &Request) -> bool {
134 is_git_lfs_json_mimetype(content_type) 75 is_git_lfs_json_mimetype(content_type)
135} 76}
136 77
137#[async_trait]
138impl<T, S> FromRequest<S> for GitLfsJson<T> 78impl<T, S> FromRequest<S> for GitLfsJson<T>
139where 79where
140 T: DeserializeOwned, 80 T: DeserializeOwned,
@@ -158,46 +98,97 @@ impl<T: Serialize> IntoResponse for GitLfsJson<T> {
158 let GitLfsJson(json) = self; 98 let GitLfsJson(json) = self;
159 let mut resp = json.into_response(); 99 let mut resp = json.into_response();
160 resp.headers_mut().insert( 100 resp.headers_mut().insert(
161 header::CONTENT_TYPE, 101 http::header::CONTENT_TYPE,
162 HeaderValue::from_static("application/vnd.git-lfs+json; charset=utf-8"), 102 http::HeaderValue::from_static("application/vnd.git-lfs+json; charset=utf-8"),
163 ); 103 );
164 resp 104 resp
165 } 105 }
166} 106}
167 107
168#[derive(Debug, Serialize)] 108#[derive(Clone)]
169pub struct GitLfsErrorData<'a> { 109pub struct RepositoryName(pub String);
170 pub message: &'a str, 110
111pub struct RepositoryNameRejection;
112
113impl IntoResponse for RepositoryNameRejection {
114 fn into_response(self) -> Response {
115 (
116 http::StatusCode::INTERNAL_SERVER_ERROR,
117 "Missing repository name",
118 )
119 .into_response()
120 }
171} 121}
172 122
173pub type GitLfsErrorResponse<'a> = (StatusCode, GitLfsJson<GitLfsErrorData<'a>>); 123impl<S: Send + Sync> FromRequestParts<S> for RepositoryName {
124 type Rejection = RepositoryNameRejection;
174 125
175pub const fn make_error_resp(code: StatusCode, message: &str) -> GitLfsErrorResponse { 126 async fn from_request_parts(
176 (code, GitLfsJson(Json(GitLfsErrorData { message }))) 127 parts: &mut http::request::Parts,
128 state: &S,
129 ) -> Result<Self, Self::Rejection> {
130 let Ok(Extension(repo_name)) = Extension::<Self>::from_request_parts(parts, state).await
131 else {
132 return Err(RepositoryNameRejection);
133 };
134 Ok(repo_name)
135 }
177} 136}
178 137
179#[derive(Debug, Serialize, Clone)] 138// ----------------------- Git LFS Batch API -----------------------
180pub struct BatchResponseObjectAction { 139
181 pub href: String, 140#[derive(Debug, Deserialize, PartialEq, Eq, Clone)]
182 #[serde(skip_serializing_if = "HashMap::is_empty")] 141pub struct BatchRequest {
183 pub header: HashMap<String, String>, 142 pub operation: Operation,
184 pub expires_at: DateTime<Utc>, 143 #[serde(default = "default_transfers")]
144 pub transfers: Vec<TransferAdapter>,
145 pub objects: Vec<BatchRequestObject>,
146 #[serde(default)]
147 pub hash_algo: HashAlgo,
185} 148}
186 149
187#[derive(Default, Debug, Serialize, Clone)] 150#[derive(Debug, Deserialize, PartialEq, Eq, Clone)]
188pub struct BatchResponseObjectActions { 151pub struct BatchRequestObject {
189 #[serde(skip_serializing_if = "Option::is_none")] 152 pub oid: Oid,
190 pub upload: Option<BatchResponseObjectAction>, 153 pub size: i64,
191 #[serde(skip_serializing_if = "Option::is_none")]
192 pub download: Option<BatchResponseObjectAction>,
193 #[serde(skip_serializing_if = "Option::is_none")]
194 pub verify: Option<BatchResponseObjectAction>,
195} 154}
196 155
197#[derive(Debug, Clone, Serialize)] 156#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)]
198pub struct BatchResponseObjectError { 157pub enum TransferAdapter {
199 pub code: u16, 158 #[serde(rename = "basic")]
200 pub message: String, 159 Basic,
160 #[serde(other)]
161 Unknown,
162}
163
164fn default_transfers() -> Vec<TransferAdapter> {
165 vec![TransferAdapter::Basic]
166}
167
168#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)]
169pub enum HashAlgo {
170 #[serde(rename = "sha256")]
171 Sha256,
172 #[serde(other)]
173 Unknown,
174}
175
176impl Default for HashAlgo {
177 fn default() -> Self {
178 Self::Sha256
179 }
180}
181
182#[derive(Debug, Serialize, Deserialize, Clone)]
183struct BatchRef {
184 name: String,
185}
186
187#[derive(Debug, Serialize, Clone)]
188pub struct BatchResponse {
189 pub transfer: TransferAdapter,
190 pub objects: Vec<BatchResponseObject>,
191 pub hash_algo: HashAlgo,
201} 192}
202 193
203#[derive(Debug, Serialize, Clone)] 194#[derive(Debug, Serialize, Clone)]
@@ -211,10 +202,16 @@ pub struct BatchResponseObject {
211 pub error: Option<BatchResponseObjectError>, 202 pub error: Option<BatchResponseObjectError>,
212} 203}
213 204
205#[derive(Debug, Clone, Serialize)]
206pub struct BatchResponseObjectError {
207 pub code: u16,
208 pub message: String,
209}
210
214impl BatchResponseObject { 211impl BatchResponseObject {
215 pub fn error( 212 pub fn error(
216 obj: &BatchRequestObject, 213 obj: &BatchRequestObject,
217 code: StatusCode, 214 code: http::StatusCode,
218 message: String, 215 message: String,
219 ) -> BatchResponseObject { 216 ) -> BatchResponseObject {
220 BatchResponseObject { 217 BatchResponseObject {
@@ -231,10 +228,21 @@ impl BatchResponseObject {
231} 228}
232 229
233#[derive(Debug, Serialize, Clone)] 230#[derive(Debug, Serialize, Clone)]
234pub struct BatchResponse { 231pub struct BatchResponseObjectAction {
235 pub transfer: TransferAdapter, 232 pub href: String,
236 pub objects: Vec<BatchResponseObject>, 233 #[serde(skip_serializing_if = "HashMap::is_empty")]
237 pub hash_algo: HashAlgo, 234 pub header: HashMap<String, String>,
235 pub expires_at: DateTime<Utc>,
236}
237
238#[derive(Default, Debug, Serialize, Clone)]
239pub struct BatchResponseObjectActions {
240 #[serde(skip_serializing_if = "Option::is_none")]
241 pub upload: Option<BatchResponseObjectAction>,
242 #[serde(skip_serializing_if = "Option::is_none")]
243 pub download: Option<BatchResponseObjectAction>,
244 #[serde(skip_serializing_if = "Option::is_none")]
245 pub verify: Option<BatchResponseObjectAction>,
238} 246}
239 247
240#[test] 248#[test]
diff --git a/gitolfs3-server/src/authz.rs b/gitolfs3-server/src/authz.rs
index 0674cef..c4cb6df 100644
--- a/gitolfs3-server/src/authz.rs
+++ b/gitolfs3-server/src/authz.rs
@@ -1,41 +1,22 @@
1use std::collections::HashSet; 1use std::collections::HashSet;
2 2
3use axum::http::{header, HeaderMap, StatusCode}; 3use axum::http;
4use chrono::{DateTime, Utc}; 4use chrono::{DateTime, Utc};
5use gitolfs3_common::{generate_tag, Claims, Digest, Oid, Operation, SpecificClaims}; 5use gitolfs3_common::{Claims, Digest, Oid, Operation, SpecificClaims, generate_tag};
6 6
7use crate::{ 7use crate::{
8 api::{make_error_resp, GitLfsErrorResponse, REPO_NOT_FOUND}, 8 api::{GitLfsErrorResponse, REPO_NOT_FOUND, make_error_resp},
9 config::AuthorizationConfig, 9 config::AuthorizationConfig,
10}; 10};
11 11
12pub struct Trusted(pub bool); 12pub struct Trusted(pub bool);
13 13
14fn forwarded_from_trusted_host(
15 headers: &HeaderMap,
16 trusted: &HashSet<String>,
17) -> Result<bool, GitLfsErrorResponse<'static>> {
18 if let Some(forwarded_host) = headers.get("X-Forwarded-Host") {
19 if let Ok(forwarded_host) = forwarded_host.to_str() {
20 if trusted.contains(forwarded_host) {
21 return Ok(true);
22 }
23 } else {
24 return Err(make_error_resp(
25 StatusCode::NOT_FOUND,
26 "Invalid X-Forwarded-Host header",
27 ));
28 }
29 }
30 Ok(false)
31}
32
33pub fn authorize_batch( 14pub fn authorize_batch(
34 conf: &AuthorizationConfig, 15 conf: &AuthorizationConfig,
35 repo_path: &str, 16 repo_path: &str,
36 public: bool, 17 public: bool,
37 operation: Operation, 18 operation: Operation,
38 headers: &HeaderMap, 19 headers: &http::HeaderMap,
39) -> Result<Trusted, GitLfsErrorResponse<'static>> { 20) -> Result<Trusted, GitLfsErrorResponse<'static>> {
40 // - No authentication required for downloading exported repos 21 // - No authentication required for downloading exported repos
41 // - When authenticated: 22 // - When authenticated:
@@ -57,7 +38,7 @@ fn authorize_batch_unauthenticated(
57 conf: &AuthorizationConfig, 38 conf: &AuthorizationConfig,
58 public: bool, 39 public: bool,
59 operation: Operation, 40 operation: Operation,
60 headers: &HeaderMap, 41 headers: &http::HeaderMap,
61) -> Result<Trusted, GitLfsErrorResponse<'static>> { 42) -> Result<Trusted, GitLfsErrorResponse<'static>> {
62 let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?; 43 let trusted = forwarded_from_trusted_host(headers, &conf.trusted_forwarded_hosts)?;
63 match operation { 44 match operation {
@@ -71,7 +52,7 @@ fn authorize_batch_unauthenticated(
71 return Err(REPO_NOT_FOUND); 52 return Err(REPO_NOT_FOUND);
72 } 53 }
73 Err(make_error_resp( 54 Err(make_error_resp(
74 StatusCode::FORBIDDEN, 55 http::StatusCode::FORBIDDEN,
75 "Authentication required to upload", 56 "Authentication required to upload",
76 )) 57 ))
77 } 58 }
@@ -94,7 +75,7 @@ pub fn authorize_get(
94 conf: &AuthorizationConfig, 75 conf: &AuthorizationConfig,
95 repo_path: &str, 76 repo_path: &str,
96 oid: Oid, 77 oid: Oid,
97 headers: &HeaderMap, 78 headers: &http::HeaderMap,
98) -> Result<(), GitLfsErrorResponse<'static>> { 79) -> Result<(), GitLfsErrorResponse<'static>> {
99 let claims = VerifyClaimsInput { 80 let claims = VerifyClaimsInput {
100 specific_claims: SpecificClaims::Download(oid), 81 specific_claims: SpecificClaims::Download(oid),
@@ -102,27 +83,48 @@ pub fn authorize_get(
102 }; 83 };
103 if !verify_claims(conf, &claims, headers)? { 84 if !verify_claims(conf, &claims, headers)? {
104 return Err(make_error_resp( 85 return Err(make_error_resp(
105 StatusCode::UNAUTHORIZED, 86 http::StatusCode::UNAUTHORIZED,
106 "Repository not found", 87 "Repository not found",
107 )); 88 ));
108 } 89 }
109 Ok(()) 90 Ok(())
110} 91}
111 92
112pub struct VerifyClaimsInput<'a> { 93fn forwarded_from_trusted_host(
113 pub specific_claims: SpecificClaims, 94 headers: &http::HeaderMap,
114 pub repo_path: &'a str, 95 trusted: &HashSet<String>,
96) -> Result<bool, GitLfsErrorResponse<'static>> {
97 if let Some(forwarded_host) = headers.get("X-Forwarded-Host") {
98 if let Ok(forwarded_host) = forwarded_host.to_str() {
99 if trusted.contains(forwarded_host) {
100 return Ok(true);
101 }
102 } else {
103 return Err(make_error_resp(
104 http::StatusCode::NOT_FOUND,
105 "Invalid X-Forwarded-Host header",
106 ));
107 }
108 }
109 Ok(false)
110}
111
112struct VerifyClaimsInput<'a> {
113 specific_claims: SpecificClaims,
114 repo_path: &'a str,
115} 115}
116 116
117fn verify_claims( 117fn verify_claims(
118 conf: &AuthorizationConfig, 118 conf: &AuthorizationConfig,
119 claims: &VerifyClaimsInput, 119 claims: &VerifyClaimsInput,
120 headers: &HeaderMap, 120 headers: &http::HeaderMap,
121) -> Result<bool, GitLfsErrorResponse<'static>> { 121) -> Result<bool, GitLfsErrorResponse<'static>> {
122 const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = 122 const INVALID_AUTHZ_HEADER: GitLfsErrorResponse = make_error_resp(
123 make_error_resp(StatusCode::BAD_REQUEST, "Invalid authorization header"); 123 http::StatusCode::BAD_REQUEST,
124 "Invalid authorization header",
125 );
124 126
125 let Some(authz) = headers.get(header::AUTHORIZATION) else { 127 let Some(authz) = headers.get(http::header::AUTHORIZATION) else {
126 return Ok(false); 128 return Ok(false);
127 }; 129 };
128 let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?; 130 let authz = authz.to_str().map_err(|_| INVALID_AUTHZ_HEADER)?;
@@ -141,7 +143,12 @@ fn verify_claims(
141 }, 143 },
142 &conf.key, 144 &conf.key,
143 ) 145 )
144 .ok_or_else(|| make_error_resp(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error"))?; 146 .ok_or_else(|| {
147 make_error_resp(
148 http::StatusCode::INTERNAL_SERVER_ERROR,
149 "Internal server error",
150 )
151 })?;
145 if tag != expected_tag { 152 if tag != expected_tag {
146 return Err(INVALID_AUTHZ_HEADER); 153 return Err(INVALID_AUTHZ_HEADER);
147 } 154 }
@@ -175,8 +182,11 @@ fn test_validate_claims() {
175 repo_path: claims.repo_path, 182 repo_path: claims.repo_path,
176 specific_claims: claims.specific_claims, 183 specific_claims: claims.specific_claims,
177 }; 184 };
178 let mut headers = HeaderMap::new(); 185 let mut headers = http::HeaderMap::new();
179 headers.insert(header::AUTHORIZATION, header_value.try_into().unwrap()); 186 headers.insert(
187 http::header::AUTHORIZATION,
188 header_value.try_into().unwrap(),
189 );
180 190
181 assert!(verify_claims(&conf, &verification_claims, &headers).unwrap()); 191 assert!(verify_claims(&conf, &verification_claims, &headers).unwrap());
182} 192}
diff --git a/gitolfs3-server/src/config.rs b/gitolfs3-server/src/config.rs
index 75e84dc..7adc9f0 100644
--- a/gitolfs3-server/src/config.rs
+++ b/gitolfs3-server/src/config.rs
@@ -1,66 +1,6 @@
1use std::collections::HashSet; 1use std::collections::HashSet;
2 2
3use gitolfs3_common::{load_key, Key}; 3use gitolfs3_common::{Key, load_key};
4
5struct Env {
6 s3_access_key_id: String,
7 s3_secret_access_key: String,
8 s3_bucket: String,
9 s3_region: String,
10 s3_endpoint: String,
11 base_url: String,
12 key_path: String,
13 listen_host: String,
14 listen_port: String,
15 download_limit: String,
16 trusted_forwarded_hosts: String,
17}
18
19fn require_env(name: &str) -> Result<String, String> {
20 std::env::var(name)
21 .map_err(|_| format!("environment variable {name} should be defined and valid"))
22}
23
24impl Env {
25 fn load() -> Result<Env, String> {
26 Ok(Env {
27 s3_secret_access_key: require_env("GITOLFS3_S3_SECRET_ACCESS_KEY_FILE")?,
28 s3_access_key_id: require_env("GITOLFS3_S3_ACCESS_KEY_ID_FILE")?,
29 s3_region: require_env("GITOLFS3_S3_REGION")?,
30 s3_endpoint: require_env("GITOLFS3_S3_ENDPOINT")?,
31 s3_bucket: require_env("GITOLFS3_S3_BUCKET")?,
32 base_url: require_env("GITOLFS3_BASE_URL")?,
33 key_path: require_env("GITOLFS3_KEY_PATH")?,
34 listen_host: require_env("GITOLFS3_LISTEN_HOST")?,
35 listen_port: require_env("GITOLFS3_LISTEN_PORT")?,
36 download_limit: require_env("GITOLFS3_DOWNLOAD_LIMIT")?,
37 trusted_forwarded_hosts: std::env::var("GITOLFS3_TRUSTED_FORWARDED_HOSTS")
38 .unwrap_or_default(),
39 })
40 }
41}
42
43fn get_s3_client(env: &Env) -> Result<aws_sdk_s3::Client, std::io::Error> {
44 let access_key_id = std::fs::read_to_string(&env.s3_access_key_id)?;
45 let secret_access_key = std::fs::read_to_string(&env.s3_secret_access_key)?;
46
47 let credentials = aws_sdk_s3::config::Credentials::new(
48 access_key_id,
49 secret_access_key,
50 None,
51 None,
52 "gitolfs3-env",
53 );
54 let config = aws_config::SdkConfig::builder()
55 .behavior_version(aws_config::BehaviorVersion::latest())
56 .region(aws_config::Region::new(env.s3_region.clone()))
57 .endpoint_url(&env.s3_endpoint)
58 .credentials_provider(aws_sdk_s3::config::SharedCredentialsProvider::new(
59 credentials,
60 ))
61 .build();
62 Ok(aws_sdk_s3::Client::new(&config))
63}
64 4
65pub struct Config { 5pub struct Config {
66 pub listen_addr: (String, u16), 6 pub listen_addr: (String, u16),
@@ -83,7 +23,7 @@ impl Config {
83 Err(e) => return Err(format!("failed to load configuration: {e}")), 23 Err(e) => return Err(format!("failed to load configuration: {e}")),
84 }; 24 };
85 25
86 let s3_client = match get_s3_client(&env) { 26 let s3_client = match create_s3_client(&env) {
87 Ok(s3_client) => s3_client, 27 Ok(s3_client) => s3_client,
88 Err(e) => return Err(format!("failed to create S3 client: {e}")), 28 Err(e) => return Err(format!("failed to create S3 client: {e}")),
89 }; 29 };
@@ -120,3 +60,63 @@ impl Config {
120 }) 60 })
121 } 61 }
122} 62}
63
64fn create_s3_client(env: &Env) -> Result<aws_sdk_s3::Client, std::io::Error> {
65 let access_key_id = std::fs::read_to_string(&env.s3_access_key_id)?;
66 let secret_access_key = std::fs::read_to_string(&env.s3_secret_access_key)?;
67
68 let credentials = aws_sdk_s3::config::Credentials::new(
69 access_key_id,
70 secret_access_key,
71 None,
72 None,
73 "gitolfs3-env",
74 );
75 let config = aws_config::SdkConfig::builder()
76 .behavior_version(aws_config::BehaviorVersion::latest())
77 .region(aws_config::Region::new(env.s3_region.clone()))
78 .endpoint_url(&env.s3_endpoint)
79 .credentials_provider(aws_sdk_s3::config::SharedCredentialsProvider::new(
80 credentials,
81 ))
82 .build();
83 Ok(aws_sdk_s3::Client::new(&config))
84}
85
86struct Env {
87 s3_access_key_id: String,
88 s3_secret_access_key: String,
89 s3_bucket: String,
90 s3_region: String,
91 s3_endpoint: String,
92 base_url: String,
93 key_path: String,
94 listen_host: String,
95 listen_port: String,
96 download_limit: String,
97 trusted_forwarded_hosts: String,
98}
99
100impl Env {
101 fn load() -> Result<Env, String> {
102 Ok(Env {
103 s3_secret_access_key: require_env("GITOLFS3_S3_SECRET_ACCESS_KEY_FILE")?,
104 s3_access_key_id: require_env("GITOLFS3_S3_ACCESS_KEY_ID_FILE")?,
105 s3_region: require_env("GITOLFS3_S3_REGION")?,
106 s3_endpoint: require_env("GITOLFS3_S3_ENDPOINT")?,
107 s3_bucket: require_env("GITOLFS3_S3_BUCKET")?,
108 base_url: require_env("GITOLFS3_BASE_URL")?,
109 key_path: require_env("GITOLFS3_KEY_PATH")?,
110 listen_host: require_env("GITOLFS3_LISTEN_HOST")?,
111 listen_port: require_env("GITOLFS3_LISTEN_PORT")?,
112 download_limit: require_env("GITOLFS3_DOWNLOAD_LIMIT")?,
113 trusted_forwarded_hosts: std::env::var("GITOLFS3_TRUSTED_FORWARDED_HOSTS")
114 .unwrap_or_default(),
115 })
116 }
117}
118
119fn require_env(name: &str) -> Result<String, String> {
120 std::env::var(name)
121 .map_err(|_| format!("environment variable {name} should be defined and valid"))
122}
diff --git a/gitolfs3-server/src/dlimit.rs b/gitolfs3-server/src/dlimit.rs
index f68bec1..7a82a18 100644
--- a/gitolfs3-server/src/dlimit.rs
+++ b/gitolfs3-server/src/dlimit.rs
@@ -55,7 +55,7 @@ impl DownloadLimiter {
55 Ok(true) 55 Ok(true)
56 } 56 }
57 57
58 pub async fn reset(&mut self) { 58 async fn reset(&mut self) {
59 self.current = 0; 59 self.current = 0;
60 if let Err(e) = self.write_new_count().await { 60 if let Err(e) = self.write_new_count().await {
61 println!("Failed to reset download counter: {e}"); 61 println!("Failed to reset download counter: {e}");
diff --git a/gitolfs3-server/src/handler.rs b/gitolfs3-server/src/handler.rs
index 6516291..be39721 100644
--- a/gitolfs3-server/src/handler.rs
+++ b/gitolfs3-server/src/handler.rs
@@ -2,24 +2,24 @@ use std::{collections::HashMap, sync::Arc};
2 2
3use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput}; 3use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectOutput};
4use axum::{ 4use axum::{
5 Json,
5 extract::{Path, State}, 6 extract::{Path, State},
6 http::{header, HeaderMap, StatusCode}, 7 http,
7 response::{IntoResponse, Response}, 8 response::{IntoResponse, Response},
8 Json,
9}; 9};
10use base64::{prelude::BASE64_STANDARD, Engine}; 10use base64::{Engine, prelude::BASE64_STANDARD};
11use chrono::Utc; 11use chrono::Utc;
12use gitolfs3_common::{generate_tag, Claims, HexByte, Oid, Operation, SpecificClaims}; 12use gitolfs3_common::{Claims, HexByte, Oid, Operation, SpecificClaims, generate_tag};
13use serde::{de, Deserialize}; 13use serde::{Deserialize, de};
14use tokio::sync::Mutex; 14use tokio::sync::Mutex;
15 15
16use crate::{ 16use crate::{
17 api::{ 17 api::{
18 is_git_lfs_json_mimetype, make_error_resp, BatchRequest, BatchRequestObject, BatchResponse, 18 BatchRequest, BatchRequestObject, BatchResponse, BatchResponseObject,
19 BatchResponseObject, BatchResponseObjectAction, BatchResponseObjectActions, GitLfsJson, 19 BatchResponseObjectAction, BatchResponseObjectActions, GitLfsJson, HashAlgo, LFS_MIME,
20 HashAlgo, RepositoryName, TransferAdapter, LFS_MIME, REPO_NOT_FOUND, 20 REPO_NOT_FOUND, RepositoryName, TransferAdapter, is_git_lfs_json_mimetype, make_error_resp,
21 }, 21 },
22 authz::{authorize_batch, authorize_get, Trusted}, 22 authz::{Trusted, authorize_batch, authorize_get},
23 config::AuthorizationConfig, 23 config::AuthorizationConfig,
24 dlimit::DownloadLimiter, 24 dlimit::DownloadLimiter,
25}; 25};
@@ -33,100 +33,44 @@ pub struct AppState {
33 pub dl_limiter: Arc<Mutex<DownloadLimiter>>, 33 pub dl_limiter: Arc<Mutex<DownloadLimiter>>,
34} 34}
35 35
36fn validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool { 36enum ObjectStatus {
37 if let Some(checksum) = obj.checksum_sha256() { 37 ExistsOk { content_length: Option<i64> },
38 if let Ok(checksum) = BASE64_STANDARD.decode(checksum) { 38 ExistsInconsistent,
39 if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) { 39 DoesNotExist,
40 return Oid::from(checksum32b) == oid;
41 }
42 }
43 }
44 true
45} 40}
46 41
47fn validate_size(expected: i64, obj: &HeadObjectOutput) -> bool { 42impl AppState {
48 if let Some(length) = obj.content_length() { 43 async fn check_object(&self, repo: &str, obj: &BatchRequestObject) -> Result<ObjectStatus, ()> {
49 return length == expected; 44 let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1]));
50 } 45 let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid);
51 true
52}
53 46
54async fn handle_upload_object( 47 let result = match self
55 state: &AppState, 48 .s3_client
56 repo: &str, 49 .head_object()
57 obj: &BatchRequestObject, 50 .bucket(&self.s3_bucket)
58) -> Option<BatchResponseObject> { 51 .key(full_path)
59 let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); 52 .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
60 let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); 53 .send()
61 54 .await
62 match state 55 {
63 .s3_client 56 Ok(result) => result,
64 .head_object() 57 Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {
65 .bucket(&state.s3_bucket) 58 return Ok(ObjectStatus::DoesNotExist);
66 .key(full_path.clone())
67 .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
68 .send()
69 .await
70 {
71 Ok(result) => {
72 if validate_size(obj.size, &result) && validate_checksum(obj.oid, &result) {
73 return None;
74 } 59 }
75 } 60 Err(e) => {
76 Err(SdkError::ServiceError(e)) if e.err().is_not_found() => {} 61 println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid);
77 Err(e) => { 62 return Err(());
78 println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid); 63 }
79 return Some(BatchResponseObject::error( 64 };
80 obj,
81 StatusCode::INTERNAL_SERVER_ERROR,
82 "Failed to query object information".to_string(),
83 ));
84 }
85 };
86
87 let expires_in = std::time::Duration::from_secs(5 * 60);
88 let expires_at = Utc::now() + expires_in;
89 65
90 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { 66 // Scaleway actually doesn't provide SHA256 support, but maybe in the future :)
91 return Some(BatchResponseObject::error( 67 if !s3_validate_checksum(obj.oid, &result) || !s3_validate_size(obj.size, &result) {
92 obj, 68 return Ok(ObjectStatus::ExistsInconsistent);
93 StatusCode::INTERNAL_SERVER_ERROR, 69 }
94 "Failed to generate upload URL".to_string(), 70 Ok(ObjectStatus::ExistsOk {
95 )); 71 content_length: result.content_length(),
96 }; 72 })
97 let Ok(presigned) = state 73 }
98 .s3_client
99 .put_object()
100 .bucket(&state.s3_bucket)
101 .key(full_path)
102 .checksum_sha256(obj.oid.to_string())
103 .content_length(obj.size)
104 .presigned(config)
105 .await
106 else {
107 return Some(BatchResponseObject::error(
108 obj,
109 StatusCode::INTERNAL_SERVER_ERROR,
110 "Failed to generate upload URL".to_string(),
111 ));
112 };
113 Some(BatchResponseObject {
114 oid: obj.oid,
115 size: obj.size,
116 authenticated: Some(true),
117 actions: BatchResponseObjectActions {
118 upload: Some(BatchResponseObjectAction {
119 header: presigned
120 .headers()
121 .map(|(k, v)| (k.to_owned(), v.to_owned()))
122 .collect(),
123 expires_at,
124 href: presigned.uri().to_string(),
125 }),
126 ..Default::default()
127 },
128 error: None,
129 })
130} 74}
131 75
132async fn handle_download_object( 76async fn handle_download_object(
@@ -138,42 +82,24 @@ async fn handle_download_object(
138 let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1])); 82 let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1]));
139 let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid); 83 let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid);
140 84
141 let result = match state 85 let content_length = match state.check_object(repo, obj).await {
142 .s3_client 86 Ok(ObjectStatus::ExistsOk { content_length }) => content_length,
143 .head_object() 87 Ok(_) => {
144 .bucket(&state.s3_bucket)
145 .key(&full_path)
146 .checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
147 .send()
148 .await
149 {
150 Ok(result) => result,
151 Err(e) => {
152 println!("Failed to HeadObject (repo {repo}, OID {}): {e}", obj.oid);
153 return BatchResponseObject::error( 88 return BatchResponseObject::error(
154 obj, 89 obj,
155 StatusCode::INTERNAL_SERVER_ERROR, 90 http::StatusCode::UNPROCESSABLE_ENTITY,
91 "Object corrupted".to_string(),
92 );
93 }
94 Err(_) => {
95 return BatchResponseObject::error(
96 obj,
97 http::StatusCode::INTERNAL_SERVER_ERROR,
156 "Failed to query object information".to_string(), 98 "Failed to query object information".to_string(),
157 ); 99 );
158 } 100 }
159 }; 101 };
160 102
161 // Scaleway actually doesn't provide SHA256 suport, but maybe in the future :)
162 if !validate_checksum(obj.oid, &result) {
163 return BatchResponseObject::error(
164 obj,
165 StatusCode::UNPROCESSABLE_ENTITY,
166 "Object corrupted".to_string(),
167 );
168 }
169 if !validate_size(obj.size, &result) {
170 return BatchResponseObject::error(
171 obj,
172 StatusCode::UNPROCESSABLE_ENTITY,
173 "Incorrect size specified (or object corrupted)".to_string(),
174 );
175 }
176
177 let expires_in = std::time::Duration::from_secs(5 * 60); 103 let expires_in = std::time::Duration::from_secs(5 * 60);
178 let expires_at = Utc::now() + expires_in; 104 let expires_at = Utc::now() + expires_in;
179 105
@@ -181,7 +107,7 @@ async fn handle_download_object(
181 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else { 107 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else {
182 return BatchResponseObject::error( 108 return BatchResponseObject::error(
183 obj, 109 obj,
184 StatusCode::INTERNAL_SERVER_ERROR, 110 http::StatusCode::INTERNAL_SERVER_ERROR,
185 "Failed to generate upload URL".to_string(), 111 "Failed to generate upload URL".to_string(),
186 ); 112 );
187 }; 113 };
@@ -195,7 +121,7 @@ async fn handle_download_object(
195 else { 121 else {
196 return BatchResponseObject::error( 122 return BatchResponseObject::error(
197 obj, 123 obj,
198 StatusCode::INTERNAL_SERVER_ERROR, 124 http::StatusCode::INTERNAL_SERVER_ERROR,
199 "Failed to generate upload URL".to_string(), 125 "Failed to generate upload URL".to_string(),
200 ); 126 );
201 }; 127 };
@@ -218,7 +144,7 @@ async fn handle_download_object(
218 }; 144 };
219 } 145 }
220 146
221 if let Some(content_length) = result.content_length() { 147 if let Some(content_length) = content_length {
222 if content_length > 0 { 148 if content_length > 0 {
223 match state 149 match state
224 .dl_limiter 150 .dl_limiter
@@ -231,7 +157,7 @@ async fn handle_download_object(
231 Ok(false) => { 157 Ok(false) => {
232 return BatchResponseObject::error( 158 return BatchResponseObject::error(
233 obj, 159 obj,
234 StatusCode::SERVICE_UNAVAILABLE, 160 http::StatusCode::SERVICE_UNAVAILABLE,
235 "Public LFS downloads temporarily unavailable".to_string(), 161 "Public LFS downloads temporarily unavailable".to_string(),
236 ); 162 );
237 } 163 }
@@ -239,7 +165,7 @@ async fn handle_download_object(
239 println!("Failed to request {content_length} bytes from download limiter: {e}"); 165 println!("Failed to request {content_length} bytes from download limiter: {e}");
240 return BatchResponseObject::error( 166 return BatchResponseObject::error(
241 obj, 167 obj,
242 StatusCode::INTERNAL_SERVER_ERROR, 168 http::StatusCode::INTERNAL_SERVER_ERROR,
243 "Internal server error".to_string(), 169 "Internal server error".to_string(),
244 ); 170 );
245 } 171 }
@@ -257,18 +183,11 @@ async fn handle_download_object(
257 ) else { 183 ) else {
258 return BatchResponseObject::error( 184 return BatchResponseObject::error(
259 obj, 185 obj,
260 StatusCode::INTERNAL_SERVER_ERROR, 186 http::StatusCode::INTERNAL_SERVER_ERROR,
261 "Internal server error".to_string(), 187 "Internal server error".to_string(),
262 ); 188 );
263 }; 189 };
264 190
265 let upload_path = format!(
266 "{repo}/info/lfs/objects/{}/{}/{}",
267 HexByte(obj.oid[0]),
268 HexByte(obj.oid[1]),
269 obj.oid,
270 );
271
272 BatchResponseObject { 191 BatchResponseObject {
273 oid: obj.oid, 192 oid: obj.oid,
274 size: obj.size, 193 size: obj.size,
@@ -284,7 +203,13 @@ async fn handle_download_object(
284 map 203 map
285 }, 204 },
286 expires_at, 205 expires_at,
287 href: format!("{}/{upload_path}", state.base_url), 206 href: format!(
207 "{}/{repo}/info/lfs/objects/{}/{}/{}",
208 state.base_url,
209 HexByte(obj.oid[0]),
210 HexByte(obj.oid[1]),
211 obj.oid
212 ),
288 }), 213 }),
289 ..Default::default() 214 ..Default::default()
290 }, 215 },
@@ -292,83 +217,6 @@ async fn handle_download_object(
292 } 217 }
293} 218}
294 219
295fn repo_exists(name: &str) -> bool {
296 let Ok(metadata) = std::fs::metadata(name) else {
297 return false;
298 };
299 metadata.is_dir()
300}
301
302fn is_repo_public(name: &str) -> Option<bool> {
303 if !repo_exists(name) {
304 return None;
305 }
306 match std::fs::metadata(format!("{name}/git-daemon-export-ok")) {
307 Ok(metadata) if metadata.is_file() => Some(true),
308 Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false),
309 _ => None,
310 }
311}
312
313pub async fn batch(
314 State(state): State<Arc<AppState>>,
315 headers: HeaderMap,
316 RepositoryName(repo): RepositoryName,
317 GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>,
318) -> Response {
319 let Some(public) = is_repo_public(&repo) else {
320 return REPO_NOT_FOUND.into_response();
321 };
322 let Trusted(trusted) = match authorize_batch(
323 &state.authz_conf,
324 &repo,
325 public,
326 payload.operation,
327 &headers,
328 ) {
329 Ok(authn) => authn,
330 Err(e) => return e.into_response(),
331 };
332
333 if !headers
334 .get_all("Accept")
335 .iter()
336 .filter_map(|v| v.to_str().ok())
337 .any(is_git_lfs_json_mimetype)
338 {
339 let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types");
340 return make_error_resp(StatusCode::NOT_ACCEPTABLE, &message).into_response();
341 }
342
343 if payload.hash_algo != HashAlgo::Sha256 {
344 let message = "Unsupported hashing algorithm specified";
345 return make_error_resp(StatusCode::CONFLICT, message).into_response();
346 }
347 if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) {
348 let message = "Unsupported transfer adapter specified (supported: basic)";
349 return make_error_resp(StatusCode::CONFLICT, message).into_response();
350 }
351
352 let mut resp = BatchResponse {
353 transfer: TransferAdapter::Basic,
354 objects: vec![],
355 hash_algo: HashAlgo::Sha256,
356 };
357 for obj in payload.objects {
358 match payload.operation {
359 Operation::Download => resp
360 .objects
361 .push(handle_download_object(&state, &repo, &obj, trusted).await),
362 Operation::Upload => {
363 if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await {
364 resp.objects.push(obj_resp);
365 }
366 }
367 };
368 }
369 GitLfsJson(Json(resp)).into_response()
370}
371
372#[derive(Deserialize, Copy, Clone)] 220#[derive(Deserialize, Copy, Clone)]
373#[serde(remote = "Self")] 221#[serde(remote = "Self")]
374pub struct FileParams { 222pub struct FileParams {
@@ -382,11 +230,11 @@ impl<'de> Deserialize<'de> for FileParams {
382 where 230 where
383 D: serde::Deserializer<'de>, 231 D: serde::Deserializer<'de>,
384 { 232 {
385 let unchecked @ FileParams { 233 let unchecked @ Self {
386 oid0: HexByte(oid0), 234 oid0: HexByte(oid0),
387 oid1: HexByte(oid1), 235 oid1: HexByte(oid1),
388 oid, 236 oid,
389 } = FileParams::deserialize(deserializer)?; 237 } = Self::deserialize(deserializer)?;
390 if oid0 != oid.as_bytes()[0] { 238 if oid0 != oid.as_bytes()[0] {
391 return Err(de::Error::custom( 239 return Err(de::Error::custom(
392 "first OID path part does not match first byte of full OID", 240 "first OID path part does not match first byte of full OID",
@@ -401,9 +249,9 @@ impl<'de> Deserialize<'de> for FileParams {
401 } 249 }
402} 250}
403 251
404pub async fn obj_download( 252pub async fn handle_obj_download(
405 State(state): State<Arc<AppState>>, 253 State(state): State<Arc<AppState>>,
406 headers: HeaderMap, 254 headers: http::HeaderMap,
407 RepositoryName(repo): RepositoryName, 255 RepositoryName(repo): RepositoryName,
408 Path(FileParams { oid0, oid1, oid }): Path<FileParams>, 256 Path(FileParams { oid0, oid1, oid }): Path<FileParams>,
409) -> Response { 257) -> Response {
@@ -425,26 +273,26 @@ pub async fn obj_download(
425 Err(e) => { 273 Err(e) => {
426 println!("Failed to GetObject (repo {repo}, OID {oid}): {e}"); 274 println!("Failed to GetObject (repo {repo}, OID {oid}): {e}");
427 return ( 275 return (
428 StatusCode::INTERNAL_SERVER_ERROR, 276 http::StatusCode::INTERNAL_SERVER_ERROR,
429 "Failed to query object information", 277 "Failed to query object information",
430 ) 278 )
431 .into_response(); 279 .into_response();
432 } 280 }
433 }; 281 };
434 282
435 let mut headers = header::HeaderMap::new(); 283 let mut headers = http::header::HeaderMap::new();
436 if let Some(content_type) = result.content_type { 284 if let Some(content_type) = result.content_type {
437 let Ok(header_value) = content_type.try_into() else { 285 let Ok(header_value) = content_type.try_into() else {
438 return ( 286 return (
439 StatusCode::INTERNAL_SERVER_ERROR, 287 http::StatusCode::INTERNAL_SERVER_ERROR,
440 "Object has invalid content type", 288 "Object has invalid content type",
441 ) 289 )
442 .into_response(); 290 .into_response();
443 }; 291 };
444 headers.insert(header::CONTENT_TYPE, header_value); 292 headers.insert(http::header::CONTENT_TYPE, header_value);
445 } 293 }
446 if let Some(content_length) = result.content_length { 294 if let Some(content_length) = result.content_length {
447 headers.insert(header::CONTENT_LENGTH, content_length.into()); 295 headers.insert(http::header::CONTENT_LENGTH, content_length.into());
448 } 296 }
449 297
450 let async_read = result.body.into_async_read(); 298 let async_read = result.body.into_async_read();
@@ -453,3 +301,169 @@ pub async fn obj_download(
453 301
454 (headers, body).into_response() 302 (headers, body).into_response()
455} 303}
304
305async fn handle_upload_object(
306 state: &AppState,
307 repo: &str,
308 obj: &BatchRequestObject,
309) -> Option<BatchResponseObject> {
310 let (oid0, oid1) = (HexByte(obj.oid[0]), HexByte(obj.oid[1]));
311 let full_path = format!("{repo}/lfs/objects/{}/{}/{}", oid0, oid1, obj.oid);
312
313 match state.check_object(repo, obj).await {
314 Ok(ObjectStatus::ExistsOk { .. }) => {
315 return None;
316 }
317 Ok(_) => {}
318 Err(_) => {
319 return Some(BatchResponseObject::error(
320 obj,
321 http::StatusCode::INTERNAL_SERVER_ERROR,
322 "Failed to query object information".to_string(),
323 ));
324 }
325 };
326
327 let expires_in = std::time::Duration::from_secs(5 * 60);
328 let expires_at = Utc::now() + expires_in;
329
330 let Ok(config) = aws_sdk_s3::presigning::PresigningConfig::expires_in(expires_in) else {
331 return Some(BatchResponseObject::error(
332 obj,
333 http::StatusCode::INTERNAL_SERVER_ERROR,
334 "Failed to generate upload URL".to_string(),
335 ));
336 };
337 let Ok(presigned) = state
338 .s3_client
339 .put_object()
340 .bucket(&state.s3_bucket)
341 .key(full_path)
342 .checksum_sha256(s3_encode_checksum(obj.oid))
343 .content_length(obj.size)
344 .presigned(config)
345 .await
346 else {
347 return Some(BatchResponseObject::error(
348 obj,
349 http::StatusCode::INTERNAL_SERVER_ERROR,
350 "Failed to generate upload URL".to_string(),
351 ));
352 };
353 Some(BatchResponseObject {
354 oid: obj.oid,
355 size: obj.size,
356 authenticated: Some(true),
357 actions: BatchResponseObjectActions {
358 upload: Some(BatchResponseObjectAction {
359 header: presigned
360 .headers()
361 .map(|(k, v)| (k.to_owned(), v.to_owned()))
362 .collect(),
363 expires_at,
364 href: presigned.uri().to_string(),
365 }),
366 ..Default::default()
367 },
368 error: None,
369 })
370}
371
372pub async fn handle_batch(
373 State(state): State<Arc<AppState>>,
374 headers: http::HeaderMap,
375 RepositoryName(repo): RepositoryName,
376 GitLfsJson(Json(payload)): GitLfsJson<BatchRequest>,
377) -> Response {
378 let Some(public) = is_repo_public(&repo) else {
379 return REPO_NOT_FOUND.into_response();
380 };
381 let Trusted(trusted) = match authorize_batch(
382 &state.authz_conf,
383 &repo,
384 public,
385 payload.operation,
386 &headers,
387 ) {
388 Ok(authn) => authn,
389 Err(e) => return e.into_response(),
390 };
391
392 if !headers
393 .get_all("Accept")
394 .iter()
395 .filter_map(|v| v.to_str().ok())
396 .any(is_git_lfs_json_mimetype)
397 {
398 let message = format!("Expected `{LFS_MIME}` in list of acceptable response media types");
399 return make_error_resp(http::StatusCode::NOT_ACCEPTABLE, &message).into_response();
400 }
401
402 if payload.hash_algo != HashAlgo::Sha256 {
403 let message = "Unsupported hashing algorithm specified";
404 return make_error_resp(http::StatusCode::CONFLICT, message).into_response();
405 }
406 if !payload.transfers.is_empty() && !payload.transfers.contains(&TransferAdapter::Basic) {
407 let message = "Unsupported transfer adapter specified (supported: basic)";
408 return make_error_resp(http::StatusCode::CONFLICT, message).into_response();
409 }
410
411 let mut resp = BatchResponse {
412 transfer: TransferAdapter::Basic,
413 objects: vec![],
414 hash_algo: HashAlgo::Sha256,
415 };
416 for obj in payload.objects {
417 match payload.operation {
418 Operation::Download => resp
419 .objects
420 .push(handle_download_object(&state, &repo, &obj, trusted).await),
421 Operation::Upload => {
422 if let Some(obj_resp) = handle_upload_object(&state, &repo, &obj).await {
423 resp.objects.push(obj_resp);
424 }
425 }
426 };
427 }
428 GitLfsJson(Json(resp)).into_response()
429}
430
431fn s3_encode_checksum(oid: Oid) -> String {
432 BASE64_STANDARD.encode(oid.as_bytes())
433}
434
435fn s3_validate_checksum(oid: Oid, obj: &HeadObjectOutput) -> bool {
436 if let Some(checksum) = obj.checksum_sha256() {
437 if let Ok(checksum) = BASE64_STANDARD.decode(checksum) {
438 if let Ok(checksum32b) = TryInto::<[u8; 32]>::try_into(checksum) {
439 return Oid::from(checksum32b) == oid;
440 }
441 }
442 }
443 true
444}
445
446fn s3_validate_size(expected: i64, obj: &HeadObjectOutput) -> bool {
447 if let Some(length) = obj.content_length() {
448 return length == expected;
449 }
450 true
451}
452
453fn repo_exists(name: &str) -> bool {
454 let Ok(metadata) = std::fs::metadata(name) else {
455 return false;
456 };
457 metadata.is_dir()
458}
459
460fn is_repo_public(name: &str) -> Option<bool> {
461 if !repo_exists(name) {
462 return None;
463 }
464 match std::fs::metadata(format!("{name}/git-daemon-export-ok")) {
465 Ok(metadata) if metadata.is_file() => Some(true),
466 Err(e) if e.kind() == std::io::ErrorKind::NotFound => Some(false),
467 _ => None,
468 }
469}
diff --git a/gitolfs3-server/src/main.rs b/gitolfs3-server/src/main.rs
index c9911ed..c88de76 100644
--- a/gitolfs3-server/src/main.rs
+++ b/gitolfs3-server/src/main.rs
@@ -9,13 +9,14 @@ use config::Config;
9use dlimit::DownloadLimiter; 9use dlimit::DownloadLimiter;
10 10
11use axum::{ 11use axum::{
12 Router, ServiceExt,
12 extract::OriginalUri, 13 extract::OriginalUri,
13 http::{StatusCode, Uri}, 14 http::{self, Uri},
14 routing::{get, post}, 15 routing::{get, post},
15 Router, ServiceExt,
16}; 16};
17use handler::AppState; 17use handler::{AppState, handle_batch, handle_obj_download};
18use std::{process::ExitCode, sync::Arc}; 18use std::{process::ExitCode, sync::Arc};
19use tokio::net::TcpListener;
19use tower::Layer; 20use tower::Layer;
20 21
21#[tokio::main] 22#[tokio::main]
@@ -39,14 +40,14 @@ async fn main() -> ExitCode {
39 dl_limiter, 40 dl_limiter,
40 }); 41 });
41 let app = Router::new() 42 let app = Router::new()
42 .route("/batch", post(handler::batch)) 43 .route("/batch", post(handle_batch))
43 .route("/:oid0/:oid1/:oid", get(handler::obj_download)) 44 .route("/{oid0}/{oid1}/{oid}", get(handle_obj_download))
44 .with_state(shared_state); 45 .with_state(shared_state);
45 46
46 let middleware = axum::middleware::map_request(rewrite_url); 47 let middleware = axum::middleware::map_request(rewrite_url);
47 let app_with_middleware = middleware.layer(app); 48 let app_with_middleware = middleware.layer(app);
48 49
49 let listener = match tokio::net::TcpListener::bind(conf.listen_addr).await { 50 let listener = match TcpListener::bind(conf.listen_addr).await {
50 Ok(listener) => listener, 51 Ok(listener) => listener,
51 Err(e) => { 52 Err(e) => {
52 println!("Failed to listen: {e}"); 53 println!("Failed to listen: {e}");
@@ -63,25 +64,23 @@ async fn main() -> ExitCode {
63 } 64 }
64} 65}
65 66
66async fn rewrite_url<B>( 67async fn rewrite_url<B>(mut req: http::Request<B>) -> Result<http::Request<B>, http::StatusCode> {
67 mut req: axum::http::Request<B>,
68) -> Result<axum::http::Request<B>, StatusCode> {
69 let uri = req.uri(); 68 let uri = req.uri();
70 let original_uri = OriginalUri(uri.clone()); 69 let original_uri = OriginalUri(uri.clone());
71 70
72 let Some(path_and_query) = uri.path_and_query() else { 71 let Some(path_and_query) = uri.path_and_query() else {
73 // L @ no path & query 72 // L @ no path & query
74 return Err(StatusCode::BAD_REQUEST); 73 return Err(http::StatusCode::BAD_REQUEST);
75 }; 74 };
76 let Some((repo, path)) = path_and_query.path().split_once("/info/lfs/objects") else { 75 let Some((repo, path)) = path_and_query.path().split_once("/info/lfs/objects") else {
77 return Err(StatusCode::NOT_FOUND); 76 return Err(http::StatusCode::NOT_FOUND);
78 }; 77 };
79 let repo = repo 78 let repo = repo
80 .trim_start_matches('/') 79 .trim_start_matches('/')
81 .trim_end_matches('/') 80 .trim_end_matches('/')
82 .to_string(); 81 .to_string();
83 if !path.starts_with('/') || !repo.ends_with(".git") { 82 if !path.starts_with('/') || !repo.ends_with(".git") {
84 return Err(StatusCode::NOT_FOUND); 83 return Err(http::StatusCode::NOT_FOUND);
85 } 84 }
86 85
87 let mut parts = uri.clone().into_parts(); 86 let mut parts = uri.clone().into_parts();
@@ -90,7 +89,7 @@ async fn rewrite_url<B>(
90 Some(q) => format!("{path}?{q}").try_into().ok(), 89 Some(q) => format!("{path}?{q}").try_into().ok(),
91 }; 90 };
92 let Ok(new_uri) = Uri::from_parts(parts) else { 91 let Ok(new_uri) = Uri::from_parts(parts) else {
93 return Err(StatusCode::INTERNAL_SERVER_ERROR); 92 return Err(http::StatusCode::INTERNAL_SERVER_ERROR);
94 }; 93 };
95 94
96 *req.uri_mut() = new_uri; 95 *req.uri_mut() = new_uri;