1use crate::{GitObject, ObjectId, ObjectType, RefStore, Reference, Result, StorageError};
4use bytes::Bytes;
5use flate2::read::ZlibDecoder;
6use flate2::write::ZlibEncoder;
7use flate2::Compression;
8use parking_lot::RwLock;
9use std::collections::HashMap;
10use std::io::{Read, Write};
11use std::sync::Arc;
12
13#[derive(Debug, Default)]
15pub struct ObjectStore {
16 objects: RwLock<HashMap<ObjectId, GitObject>>,
18}
19
20impl ObjectStore {
21 pub fn new() -> Self {
23 Self::default()
24 }
25
26 pub fn put(&self, object: GitObject) -> ObjectId {
28 let id = object.id;
29 self.objects.write().insert(id, object);
30 id
31 }
32
33 pub fn get(&self, id: &ObjectId) -> Result<GitObject> {
35 self.objects
36 .read()
37 .get(id)
38 .cloned()
39 .ok_or_else(|| StorageError::ObjectNotFound(id.to_hex()))
40 }
41
42 pub fn contains(&self, id: &ObjectId) -> bool {
44 self.objects.read().contains_key(id)
45 }
46
47 pub fn len(&self) -> usize {
49 self.objects.read().len()
50 }
51
52 pub fn is_empty(&self) -> bool {
54 self.objects.read().is_empty()
55 }
56
57 pub fn list_objects(&self) -> Vec<ObjectId> {
59 self.objects.read().keys().copied().collect()
60 }
61
62 pub fn put_blob(&self, content: impl Into<Bytes>) -> ObjectId {
64 self.put(GitObject::blob(content))
65 }
66
67 pub fn compress(object: &GitObject) -> Result<Vec<u8>> {
69 let header = format!("{} {}\0", object.object_type.as_str(), object.data.len());
70 let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
71 encoder
72 .write_all(header.as_bytes())
73 .map_err(|e| StorageError::Compression(e.to_string()))?;
74 encoder
75 .write_all(&object.data)
76 .map_err(|e| StorageError::Compression(e.to_string()))?;
77 encoder
78 .finish()
79 .map_err(|e| StorageError::Compression(e.to_string()))
80 }
81
82 pub fn decompress(compressed: &[u8]) -> Result<GitObject> {
84 let mut decoder = ZlibDecoder::new(compressed);
85 let mut decompressed = Vec::new();
86 decoder
87 .read_to_end(&mut decompressed)
88 .map_err(|e| StorageError::Compression(e.to_string()))?;
89
90 let null_pos = decompressed.iter().position(|&b| b == 0).ok_or_else(|| {
92 StorageError::InvalidObject("missing null byte in header".to_string())
93 })?;
94
95 let header = String::from_utf8_lossy(&decompressed[..null_pos]);
96 let parts: Vec<&str> = header.split(' ').collect();
97 if parts.len() != 2 {
98 return Err(StorageError::InvalidObject(format!(
99 "invalid header: {}",
100 header
101 )));
102 }
103
104 let object_type = ObjectType::parse(parts[0])?;
105 let _size: usize = parts[1]
106 .parse()
107 .map_err(|_| StorageError::InvalidObject("invalid size".to_string()))?;
108
109 let data = Bytes::from(decompressed[null_pos + 1..].to_vec());
110 Ok(GitObject::new(object_type, data))
111 }
112}
113
114#[derive(Debug)]
116pub struct Repository {
117 pub name: String,
119 pub owner: String,
121 pub objects: Arc<ObjectStore>,
123 pub refs: Arc<RefStore>,
125}
126
127impl Repository {
128 pub fn new(name: impl Into<String>, owner: impl Into<String>) -> Self {
130 let refs = Arc::new(RefStore::new());
131 refs.set_symbolic("HEAD", "refs/heads/main");
133
134 Self {
135 name: name.into(),
136 owner: owner.into(),
137 objects: Arc::new(ObjectStore::new()),
138 refs,
139 }
140 }
141
142 pub fn head(&self) -> Result<ObjectId> {
144 self.refs.resolve_head()
145 }
146
147 pub fn current_branch(&self) -> Option<String> {
149 self.refs.current_branch()
150 }
151
152 pub fn commit(
154 &self,
155 tree_id: &ObjectId,
156 message: &str,
157 author: &str,
158 committer: &str,
159 ) -> Result<ObjectId> {
160 let parents: Vec<ObjectId> = match self.head() {
162 Ok(head) => vec![head],
163 Err(_) => vec![], };
165
166 let commit = GitObject::commit(tree_id, &parents, author, committer, message);
168 let commit_id = self.objects.put(commit);
169
170 if let Some(branch) = self.current_branch() {
172 self.refs.set(&format!("refs/heads/{}", branch), commit_id);
173 } else {
174 self.refs.set("HEAD", commit_id);
176 }
177
178 Ok(commit_id)
179 }
180
181 pub fn update_ref(&self, name: &str, target: ObjectId) {
183 self.refs.set(name, target);
184 }
185
186 pub fn list_refs(&self) -> Vec<(String, Reference)> {
188 self.refs.list_all()
189 }
190}
191
192#[derive(Debug, Default)]
194pub struct RepoStore {
195 repos: RwLock<HashMap<String, Arc<Repository>>>,
196}
197
198impl RepoStore {
199 pub fn new() -> Self {
201 Self::default()
202 }
203
204 pub fn create(&self, name: &str, owner: &str) -> Result<Arc<Repository>> {
206 let mut repos = self.repos.write();
207 let key = format!("{}/{}", owner, name);
208
209 if repos.contains_key(&key) {
210 return Err(StorageError::RepoExists(key));
211 }
212
213 let repo = Arc::new(Repository::new(name, owner));
214 repos.insert(key, repo.clone());
215 Ok(repo)
216 }
217
218 pub fn get(&self, owner: &str, name: &str) -> Result<Arc<Repository>> {
220 let key = format!("{}/{}", owner, name);
221 self.repos
222 .read()
223 .get(&key)
224 .cloned()
225 .ok_or(StorageError::RepoNotFound(key))
226 }
227
228 pub fn list(&self) -> Vec<Arc<Repository>> {
230 self.repos.read().values().cloned().collect()
231 }
232
233 pub fn list_by_owner(&self, owner: &str) -> Vec<Arc<Repository>> {
235 let prefix = format!("{}/", owner);
236 self.repos
237 .read()
238 .iter()
239 .filter(|(key, _)| key.starts_with(&prefix))
240 .map(|(_, repo)| repo.clone())
241 .collect()
242 }
243}
244
245#[cfg(test)]
246mod tests {
247 use super::*;
248
249 #[test]
250 fn test_object_store_roundtrip() {
251 let store = ObjectStore::new();
252 let blob = GitObject::blob(b"Hello, World!".to_vec());
253 let id = blob.id;
254
255 store.put(blob);
256
257 let retrieved = store.get(&id).unwrap();
258 assert_eq!(retrieved.data.as_ref(), b"Hello, World!");
259 }
260
261 #[test]
262 fn test_object_compression_roundtrip() {
263 let original = GitObject::blob(b"Hello, World!".to_vec());
264 let compressed = ObjectStore::compress(&original).unwrap();
265 let decompressed = ObjectStore::decompress(&compressed).unwrap();
266
267 assert_eq!(original.id, decompressed.id);
268 assert_eq!(original.object_type, decompressed.object_type);
269 assert_eq!(original.data, decompressed.data);
270 }
271
272 #[test]
273 fn test_repository_creation() {
274 let repos = RepoStore::new();
275 let repo = repos.create("test-repo", "alice").unwrap();
276
277 assert_eq!(repo.name, "test-repo");
278 assert_eq!(repo.owner, "alice");
279 assert_eq!(repo.current_branch(), Some("main".to_string()));
280 }
281
282 #[test]
283 fn test_repository_commit() {
284 let repos = RepoStore::new();
285 let repo = repos.create("test-repo", "alice").unwrap();
286
287 let blob_id = repo.objects.put_blob(b"file content".to_vec());
289
290 let tree_data = format!("100644 file.txt\0{}", hex::encode(blob_id.as_bytes()));
292 let tree = GitObject::new(ObjectType::Tree, tree_data.into_bytes());
293 let tree_id = repo.objects.put(tree);
294
295 let author = "Alice <alice@example.com> 1234567890 +0000";
297 let commit_id = repo
298 .commit(&tree_id, "Initial commit", author, author)
299 .unwrap();
300
301 assert_eq!(repo.head().unwrap(), commit_id);
303 }
304}