-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathdb.rs
More file actions
315 lines (273 loc) · 9.47 KB
/
db.rs
File metadata and controls
315 lines (273 loc) · 9.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
use anyhow::Result;
use chrono::{DateTime, Utc};
use rusqlite::{params, Connection};
use std::path::{Path, PathBuf};
pub struct Database {
conn: Connection,
}
#[derive(Debug, Clone)]
pub struct FileEntry {
pub id: i64,
pub path: PathBuf,
pub hash: String,
pub indexed_at: DateTime<Utc>,
}
#[derive(Debug, Clone)]
pub struct ChunkEntry {
pub id: i64,
pub file_id: i64,
pub chunk_index: i32,
pub content: String,
pub start_line: i32,
pub end_line: i32,
pub embedding: Vec<f32>,
}
#[derive(Debug, Clone)]
pub struct SearchResult {
pub file_id: i64,
pub chunk_id: i64,
pub path: PathBuf,
pub content: String,
pub start_line: i32,
pub end_line: i32,
pub similarity: f32,
}
#[derive(Debug, Default)]
pub struct DatabaseStats {
pub file_count: usize,
pub chunk_count: usize,
pub last_indexed: Option<DateTime<Utc>>,
}
impl Database {
pub fn new(path: &Path) -> Result<Self> {
let conn = Connection::open(path)?;
let db = Self { conn };
db.init_schema()?;
Ok(db)
}
fn init_schema(&self) -> Result<()> {
self.conn.execute_batch(
r"
CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY AUTOINCREMENT,
path TEXT UNIQUE NOT NULL,
hash TEXT NOT NULL,
indexed_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS chunks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
file_id INTEGER NOT NULL,
chunk_index INTEGER NOT NULL,
content TEXT NOT NULL,
start_line INTEGER NOT NULL,
end_line INTEGER NOT NULL,
embedding BLOB NOT NULL,
FOREIGN KEY (file_id) REFERENCES files(id) ON DELETE CASCADE,
UNIQUE(file_id, chunk_index)
);
CREATE INDEX IF NOT EXISTS idx_files_path ON files(path);
CREATE INDEX IF NOT EXISTS idx_files_hash ON files(hash);
CREATE INDEX IF NOT EXISTS idx_chunks_file_id ON chunks(file_id);
",
)?;
Ok(())
}
pub fn get_file_by_path(&self, path: &Path) -> Result<Option<FileEntry>> {
let path_str = path.to_string_lossy();
let mut stmt = self
.conn
.prepare("SELECT id, path, hash, indexed_at FROM files WHERE path = ?")?;
let result = stmt.query_row([path_str.as_ref()], |row| {
Ok(FileEntry {
id: row.get(0)?,
path: PathBuf::from(row.get::<_, String>(1)?),
hash: row.get(2)?,
indexed_at: row.get::<_, String>(3)?.parse().unwrap_or_default(),
})
});
match result {
Ok(entry) => Ok(Some(entry)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
pub fn insert_file(&self, path: &Path, hash: &str) -> Result<i64> {
let path_str = path.to_string_lossy();
let now = Utc::now().to_rfc3339();
// Use UPSERT syntax (INSERT ... ON CONFLICT) instead of INSERT OR REPLACE
// This preserves the ID if the row exists, only updating fields
self.conn.execute(
"INSERT INTO files (path, hash, indexed_at) VALUES (?, ?, ?)
ON CONFLICT(path) DO UPDATE SET
hash = excluded.hash,
indexed_at = excluded.indexed_at",
params![path_str.as_ref(), hash, now],
)?;
// If updated, last_insert_rowid might not point to the updated row in older sqlite versions
// or depending on driver behavior, so let's fetch the ID explicitly to be safe
let mut stmt = self.conn.prepare("SELECT id FROM files WHERE path = ?")?;
let id: i64 = stmt.query_row([path_str.as_ref()], |row| row.get(0))?;
Ok(id)
}
pub fn delete_file(&self, file_id: i64) -> Result<()> {
self.conn
.execute("DELETE FROM chunks WHERE file_id = ?", params![file_id])?;
self.conn
.execute("DELETE FROM files WHERE id = ?", params![file_id])?;
Ok(())
}
pub fn insert_chunk(
&self,
file_id: i64,
chunk_index: i32,
content: &str,
start_line: i32,
end_line: i32,
embedding: &[f32],
) -> Result<i64> {
let embedding_bytes = embedding_to_bytes(embedding);
self.conn.execute(
r"INSERT OR REPLACE INTO chunks
(file_id, chunk_index, content, start_line, end_line, embedding)
VALUES (?, ?, ?, ?, ?, ?)",
params![
file_id,
chunk_index,
content,
start_line,
end_line,
embedding_bytes
],
)?;
Ok(self.conn.last_insert_rowid())
}
pub fn search_similar(
&self,
query_embedding: &[f32],
path_prefix: &Path,
limit: usize,
) -> Result<Vec<SearchResult>> {
let path_prefix_str = path_prefix.to_string_lossy();
let like_pattern = format!("{}%", path_prefix_str);
let mut stmt = self.conn.prepare(
r"SELECT c.id, c.file_id, f.path, c.content, c.start_line, c.end_line, c.embedding
FROM chunks c
JOIN files f ON c.file_id = f.id
WHERE f.path LIKE ?",
)?;
let mut results: Vec<SearchResult> = stmt
.query_map([&like_pattern], |row| {
let embedding_blob: Vec<u8> = row.get(6)?;
let embedding = bytes_to_embedding(&embedding_blob);
let similarity = cosine_similarity(query_embedding, &embedding);
Ok(SearchResult {
chunk_id: row.get(0)?,
file_id: row.get(1)?,
path: PathBuf::from(row.get::<_, String>(2)?),
content: row.get(3)?,
start_line: row.get(4)?,
end_line: row.get(5)?,
similarity,
})
})?
.filter_map(Result::ok)
.collect();
// Sort by similarity (highest first)
results.sort_by(|a, b| {
b.similarity
.partial_cmp(&a.similarity)
.unwrap_or(std::cmp::Ordering::Equal)
});
results.truncate(limit * 3); // Get more for reranking
Ok(results)
}
pub fn get_all_chunks_for_file(&self, file_id: i64) -> Result<Vec<ChunkEntry>> {
let mut stmt = self.conn.prepare(
r"SELECT id, file_id, chunk_index, content, start_line, end_line, embedding
FROM chunks WHERE file_id = ? ORDER BY chunk_index",
)?;
let results = stmt
.query_map([file_id], |row| {
let embedding_blob: Vec<u8> = row.get(6)?;
Ok(ChunkEntry {
id: row.get(0)?,
file_id: row.get(1)?,
chunk_index: row.get(2)?,
content: row.get(3)?,
start_line: row.get(4)?,
end_line: row.get(5)?,
embedding: bytes_to_embedding(&embedding_blob),
})
})?
.filter_map(Result::ok)
.collect();
Ok(results)
}
pub fn get_stats(&self) -> Result<DatabaseStats> {
let file_count: usize = self
.conn
.query_row("SELECT COUNT(*) FROM files", [], |row| row.get(0))?;
let chunk_count: usize = self
.conn
.query_row("SELECT COUNT(*) FROM chunks", [], |row| row.get(0))?;
let last_indexed: Option<String> = self
.conn
.query_row(
"SELECT indexed_at FROM files ORDER BY indexed_at DESC LIMIT 1",
[],
|row| row.get(0),
)
.ok();
Ok(DatabaseStats {
file_count,
chunk_count,
last_indexed: last_indexed.and_then(|s| s.parse().ok()),
})
}
pub fn get_all_files(&self) -> Result<Vec<FileEntry>> {
let mut stmt = self
.conn
.prepare("SELECT id, path, hash, indexed_at FROM files")?;
let results = stmt
.query_map([], |row| {
Ok(FileEntry {
id: row.get(0)?,
path: PathBuf::from(row.get::<_, String>(1)?),
hash: row.get(2)?,
indexed_at: row.get::<_, String>(3)?.parse().unwrap_or_default(),
})
})?
.filter_map(Result::ok)
.collect();
Ok(results)
}
}
fn embedding_to_bytes(embedding: &[f32]) -> Vec<u8> {
embedding.iter().flat_map(|f| f.to_le_bytes()).collect()
}
fn bytes_to_embedding(bytes: &[u8]) -> Vec<f32> {
bytes
.chunks_exact(4)
.map(|chunk| {
let arr: [u8; 4] = chunk.try_into().unwrap();
f32::from_le_bytes(arr)
})
.collect()
}
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
if a.len() != b.len() || a.is_empty() {
return 0.0;
}
let (dot, norm_a, norm_b) = a.iter().zip(b.iter()).fold(
(0.0f64, 0.0f64, 0.0f64),
|(dot, norm_a, norm_b), (&x, &y)| {
let x = f64::from(x);
let y = f64::from(y);
(dot + x * y, norm_a + x * x, norm_b + y * y)
},
);
if norm_a == 0.0 || norm_b == 0.0 {
return 0.0;
}
(dot / (norm_a.sqrt() * norm_b.sqrt())) as f32
}