ankurah_storage_postgres/
lib.rs

1use std::{
2    collections::BTreeMap,
3    sync::{Arc, RwLock},
4};
5
6use ankurah_core::{
7    error::{MutationError, RetrievalError, StateError},
8    property::backend::backend_from_string,
9    storage::{StorageCollection, StorageEngine},
10};
11use ankurah_proto::{Attestation, AttestationSet, Attested, EntityState, EventId, OperationSet, State, StateBuffers};
12
13use futures_util::{pin_mut, TryStreamExt};
14
15pub mod sql_builder;
16pub mod value;
17
18use value::PGValue;
19
20use ankurah_proto::{Clock, CollectionId, EntityId, Event};
21use async_trait::async_trait;
22use bb8_postgres::{tokio_postgres::NoTls, PostgresConnectionManager};
23use tokio_postgres::{error::SqlState, types::ToSql};
24use tracing::{debug, error, info, warn};
25
26pub struct Postgres {
27    pool: bb8::Pool<PostgresConnectionManager<NoTls>>,
28}
29
30impl Postgres {
31    pub fn new(pool: bb8::Pool<PostgresConnectionManager<NoTls>>) -> anyhow::Result<Self> { Ok(Self { pool }) }
32
33    // TODO: newtype this to `BucketName(&str)` with a constructor that
34    // only accepts a subset of characters.
35    pub fn sane_name(collection: &str) -> bool {
36        for char in collection.chars() {
37            match char {
38                char if char.is_alphanumeric() => {}
39                char if char.is_numeric() => {}
40                '_' | '.' | ':' => {}
41                _ => return false,
42            }
43        }
44
45        true
46    }
47}
48
49#[async_trait]
50impl StorageEngine for Postgres {
51    type Value = PGValue;
52
53    async fn collection(&self, collection_id: &CollectionId) -> Result<std::sync::Arc<dyn StorageCollection>, RetrievalError> {
54        if !Postgres::sane_name(collection_id.as_str()) {
55            return Err(RetrievalError::InvalidBucketName);
56        }
57
58        let mut client = self.pool.get().await.map_err(RetrievalError::storage)?;
59
60        // get the current schema from the database
61        let schema = client.query_one("SELECT current_database()", &[]).await.map_err(RetrievalError::storage)?;
62        let schema = schema.get("current_database");
63
64        let bucket = PostgresBucket {
65            pool: self.pool.clone(),
66            schema,
67            collection_id: collection_id.clone(),
68            columns: Arc::new(RwLock::new(Vec::new())),
69        };
70
71        // Create tables if they don't exist
72        bucket.create_state_table(&mut client).await?;
73        bucket.create_event_table(&mut client).await?;
74        bucket.rebuild_columns_cache(&mut client).await?;
75
76        Ok(Arc::new(bucket))
77    }
78
79    async fn delete_all_collections(&self) -> Result<bool, MutationError> {
80        let mut client = self.pool.get().await.map_err(|err| MutationError::General(Box::new(err)))?;
81
82        // Get all tables in the public schema
83        let query = r#"
84            SELECT table_name 
85            FROM information_schema.tables 
86            WHERE table_schema = 'public'
87        "#;
88
89        let rows = client.query(query, &[]).await.map_err(|err| MutationError::General(Box::new(err)))?;
90        if rows.is_empty() {
91            return Ok(false);
92        }
93
94        // Start a transaction to drop all tables atomically
95        let transaction = client.transaction().await.map_err(|err| MutationError::General(Box::new(err)))?;
96
97        // Drop each table
98        for row in rows {
99            let table_name: String = row.get("table_name");
100            let drop_query = format!(r#"DROP TABLE IF EXISTS "{}""#, table_name);
101            transaction.execute(&drop_query, &[]).await.map_err(|err| MutationError::General(Box::new(err)))?;
102        }
103
104        // Commit the transaction
105        transaction.commit().await.map_err(|err| MutationError::General(Box::new(err)))?;
106
107        Ok(true)
108    }
109}
110
111#[derive(Clone, Debug)]
112pub struct PostgresColumn {
113    pub name: String,
114    pub is_nullable: bool,
115    pub data_type: String,
116}
117
118pub struct PostgresBucket {
119    pool: bb8::Pool<PostgresConnectionManager<NoTls>>,
120    collection_id: CollectionId,
121    schema: String,
122    columns: Arc<RwLock<Vec<PostgresColumn>>>,
123}
124
125impl PostgresBucket {
126    fn state_table(&self) -> String { self.collection_id.as_str().to_string() }
127
128    pub fn event_table(&self) -> String { format!("{}_event", self.collection_id.as_str()) }
129
130    /// Rebuild the cache of columns in the table.
131    pub async fn rebuild_columns_cache(&self, client: &mut tokio_postgres::Client) -> Result<(), StateError> {
132        debug!("PostgresBucket({}).rebuild_columns_cache", self.collection_id);
133        let column_query =
134            r#"SELECT column_name, is_nullable, data_type FROM information_schema.columns WHERE table_catalog = $1 AND table_name = $2;"#
135                .to_string();
136        let mut new_columns = Vec::new();
137        debug!("Querying existing columns: {:?}, [{:?}, {:?}]", column_query, &self.schema, &self.collection_id.as_str());
138        let rows = client
139            .query(&column_query, &[&self.schema, &self.collection_id.as_str()])
140            .await
141            .map_err(|err| StateError::DDLError(Box::new(err)))?;
142        for row in rows {
143            let is_nullable: String = row.get("is_nullable");
144            new_columns.push(PostgresColumn {
145                name: row.get("column_name"),
146                is_nullable: is_nullable.eq("YES"),
147                data_type: row.get("data_type"),
148            })
149        }
150
151        let mut columns = self.columns.write().unwrap();
152        *columns = new_columns;
153        drop(columns);
154
155        Ok(())
156    }
157
158    pub fn existing_columns(&self) -> Vec<String> {
159        let columns = self.columns.read().unwrap();
160        columns.iter().map(|column| column.name.clone()).collect()
161    }
162
163    pub fn column(&self, column_name: &String) -> Option<PostgresColumn> {
164        let columns = self.columns.read().unwrap();
165        columns.iter().find(|column| column.name == *column_name).cloned()
166    }
167
168    pub fn has_column(&self, column_name: &String) -> bool { self.column(column_name).is_some() }
169
170    pub async fn create_event_table(&self, client: &mut tokio_postgres::Client) -> Result<(), StateError> {
171        let create_query = format!(
172            r#"CREATE TABLE IF NOT EXISTS "{}"(
173                "id" character(43) PRIMARY KEY,
174                "entity_id" character(22),
175                "operations" bytea,
176                "parent" character(43)[],
177                "attestations" bytea
178            )"#,
179            self.event_table()
180        );
181
182        debug!("{create_query}");
183        client.execute(&create_query, &[]).await.map_err(|e| StateError::DDLError(Box::new(e)))?;
184        Ok(())
185    }
186
187    pub async fn create_state_table(&self, client: &mut tokio_postgres::Client) -> Result<(), StateError> {
188        let create_query = format!(
189            r#"CREATE TABLE IF NOT EXISTS "{}"(
190                "id" character(22) PRIMARY KEY,
191                "state_buffer" BYTEA,
192                "head" character(43)[],
193                "attestations" BYTEA[]
194            )"#,
195            self.state_table()
196        );
197
198        debug!("{create_query}");
199        match client.execute(&create_query, &[]).await {
200            Ok(_) => Ok(()),
201            Err(err) => {
202                error!("Error: {}", err);
203                Err(StateError::DDLError(Box::new(err)))
204            }
205        }
206    }
207
208    pub async fn add_missing_columns(
209        &self,
210        client: &mut tokio_postgres::Client,
211        missing: Vec<(String, &'static str)>, // column name, datatype
212    ) -> Result<(), StateError> {
213        for (column, datatype) in missing {
214            if Postgres::sane_name(&column) {
215                let alter_query = format!(r#"ALTER TABLE "{}" ADD COLUMN "{}" {}"#, self.state_table(), column, datatype,);
216                info!("PostgresBucket({}).add_missing_columns: {}", self.collection_id, alter_query);
217                match client.execute(&alter_query, &[]).await {
218                    Ok(_) => {}
219                    Err(err) => {
220                        warn!("Error adding column: {} to table: {} - rebuilding columns cache", err, self.state_table());
221                        self.rebuild_columns_cache(client).await?;
222                        return Err(StateError::DDLError(Box::new(err)));
223                    }
224                }
225            }
226        }
227
228        self.rebuild_columns_cache(client).await?;
229        Ok(())
230    }
231}
232
233#[async_trait]
234impl StorageCollection for PostgresBucket {
235    async fn set_state(&self, state: Attested<EntityState>) -> Result<bool, MutationError> {
236        let state_buffers = bincode::serialize(&state.payload.state.state_buffers)?;
237        let attestations: Vec<Vec<u8>> = state.attestations.iter().map(bincode::serialize).collect::<Result<Vec<_>, _>>()?;
238        let id = state.payload.entity_id;
239
240        // Ensure head is not empty for new records
241        if state.payload.state.head.is_empty() {
242            warn!("Warning: Empty head detected for entity {}", id);
243        }
244
245        let mut client = self.pool.get().await.map_err(|err| MutationError::General(err.into()))?;
246
247        let mut columns: Vec<String> = vec!["id".to_owned(), "state_buffer".to_owned(), "head".to_owned(), "attestations".to_owned()];
248        let mut params: Vec<&(dyn ToSql + Sync)> = Vec::new();
249        params.push(&id);
250        params.push(&state_buffers);
251        params.push(&state.payload.state.head);
252        params.push(&attestations);
253
254        let mut materialized: Vec<(String, Option<PGValue>)> = Vec::new();
255        let mut seen_properties = std::collections::HashSet::new();
256
257        // Process property values directly from state buffers
258        for (name, state_buffer) in state.payload.state.state_buffers.iter() {
259            let backend = backend_from_string(name, Some(state_buffer))?;
260            for (column, value) in backend.property_values() {
261                if !seen_properties.insert(column.clone()) {
262                    // Skip if property already seen in another backend
263                    // TODO: this should cause all (or subsequent?) fields with the same name
264                    // to be suffixed with the property id when we have property ids
265                    // requires some thought (and field metadata) on how to do this right
266                    continue;
267                }
268
269                let pg_value: Option<PGValue> = value.map(|value| value.into());
270                if !self.has_column(&column) {
271                    // We don't have the column yet and we know the type.
272                    if let Some(ref pg_value) = pg_value {
273                        self.add_missing_columns(&mut client, vec![(column.clone(), pg_value.postgres_type())]).await?;
274                    } else {
275                        // The column doesn't exist yet and we don't have a value.
276                        // This means the entire column is already null/none so we
277                        // don't need to set anything.
278                        continue;
279                    }
280                }
281
282                materialized.push((column.clone(), pg_value));
283            }
284        }
285
286        for (name, parameter) in &materialized {
287            columns.push(name.clone());
288
289            match &parameter {
290                Some(value) => match value {
291                    PGValue::CharacterVarying(string) => params.push(string),
292                    PGValue::SmallInt(number) => params.push(number),
293                    PGValue::Integer(number) => params.push(number),
294                    PGValue::BigInt(number) => params.push(number),
295                    PGValue::DoublePrecision(float) => params.push(float),
296                    PGValue::Bytea(bytes) => params.push(bytes),
297                    PGValue::Boolean(bool) => params.push(bool),
298                },
299                None => params.push(&UntypedNull),
300            }
301        }
302
303        let columns_str = columns.iter().map(|name| format!("\"{}\"", name)).collect::<Vec<String>>().join(", ");
304        let values_str = params.iter().enumerate().map(|(index, _)| format!("${}", index + 1)).collect::<Vec<String>>().join(", ");
305        let columns_update_str = columns
306            .iter()
307            .enumerate()
308            .skip(1) // Skip "id"
309            .map(|(index, name)| format!("\"{}\" = ${}", name, index + 1))
310            .collect::<Vec<String>>()
311            .join(", ");
312
313        // be careful with sql injection via bucket name
314        let query = format!(
315            r#"WITH old_state AS (
316                SELECT "head" FROM "{0}" WHERE "id" = $1
317            )
318            INSERT INTO "{0}"({1}) VALUES({2})
319            ON CONFLICT("id") DO UPDATE SET {3}
320            RETURNING (SELECT "head" FROM old_state) as old_head"#,
321            self.state_table(),
322            columns_str,
323            values_str,
324            columns_update_str
325        );
326
327        debug!("PostgresBucket({}).set_state: {}", self.collection_id, query);
328        let row = match client.query_one(&query, params.as_slice()).await {
329            Ok(row) => row,
330            Err(err) => {
331                let kind = error_kind(&err);
332                if let ErrorKind::UndefinedTable { table } = kind {
333                    if table == self.state_table() {
334                        self.create_state_table(&mut client).await?;
335                        return self.set_state(state).await; // retry
336                    }
337                }
338
339                return Err(StateError::DDLError(Box::new(err)).into());
340            }
341        };
342
343        // If this is a new entity (no old_head), or if the heads are different, return true
344        let old_head: Option<Clock> = row.get("old_head");
345        let changed = match old_head {
346            None => true, // New entity
347            Some(old_head) => old_head != state.payload.state.head,
348        };
349
350        debug!("PostgresBucket({}).set_state: Changed: {}", self.collection_id, changed);
351        Ok(changed)
352    }
353
354    async fn get_state(&self, id: EntityId) -> Result<Attested<EntityState>, RetrievalError> {
355        // be careful with sql injection via bucket name
356        let query = format!(r#"SELECT "id", "state_buffer", "head", "attestations" FROM "{}" WHERE "id" = $1"#, self.state_table());
357
358        let mut client = match self.pool.get().await {
359            Ok(client) => client,
360            Err(err) => {
361                return Err(RetrievalError::StorageError(err.into()));
362            }
363        };
364
365        debug!("PostgresBucket({}).get_state: {}", self.collection_id, query);
366        let row = match client.query_one(&query, &[&id]).await {
367            Ok(row) => row,
368            Err(err) => {
369                let kind = error_kind(&err);
370                match kind {
371                    ErrorKind::RowCount => {
372                        return Err(RetrievalError::EntityNotFound(id));
373                    }
374                    ErrorKind::UndefinedTable { table } => {
375                        if table == self.state_table() {
376                            self.create_state_table(&mut client).await.map_err(|e| RetrievalError::StorageError(e.into()))?;
377                            return Err(RetrievalError::EntityNotFound(id));
378                        }
379                    }
380                    _ => {}
381                }
382
383                return Err(RetrievalError::StorageError(err.into()));
384            }
385        };
386
387        debug!("PostgresBucket({}).get_state: Row: {:?}", self.collection_id, row);
388        let row_id: EntityId = row.try_get("id").map_err(RetrievalError::storage)?;
389        assert_eq!(row_id, id);
390
391        let serialized_buffers: Vec<u8> = row.try_get("state_buffer").map_err(RetrievalError::storage)?;
392        let state_buffers: BTreeMap<String, Vec<u8>> = bincode::deserialize(&serialized_buffers).map_err(RetrievalError::storage)?;
393        let head: Clock = row.try_get("head").map_err(RetrievalError::storage)?;
394        let attestation_bytes: Vec<Vec<u8>> = row.try_get("attestations").map_err(RetrievalError::storage)?;
395        let attestations = attestation_bytes
396            .into_iter()
397            .map(|bytes| bincode::deserialize(&bytes))
398            .collect::<Result<Vec<Attestation>, _>>()
399            .map_err(RetrievalError::storage)?;
400
401        Ok(Attested {
402            payload: EntityState {
403                entity_id: id,
404                collection: self.collection_id.clone(),
405                state: State { state_buffers: StateBuffers(state_buffers), head },
406            },
407            attestations: AttestationSet(attestations),
408        })
409    }
410
411    async fn fetch_states(&self, selection: &ankql::ast::Selection) -> Result<Vec<Attested<EntityState>>, RetrievalError> {
412        debug!("fetch_states: {:?}", selection);
413        let client = self.pool.get().await.map_err(|err| RetrievalError::StorageError(Box::new(err)))?;
414
415        let mut results = Vec::new();
416        let mut builder = SqlBuilder::with_fields(vec!["id", "state_buffer", "head", "attestations"]);
417        builder.table_name(self.state_table());
418        builder.selection(selection)?;
419
420        let (sql, args) = builder.build()?;
421        debug!("PostgresBucket({}).fetch_states: SQL: {} with args: {:?}", self.collection_id, sql, args);
422
423        let stream = match client.query_raw(&sql, args).await {
424            Ok(stream) => stream,
425            Err(err) => {
426                let kind = error_kind(&err);
427                match kind {
428                    ErrorKind::UndefinedTable { table } => {
429                        if table == self.state_table() {
430                            // Table doesn't exist yet, return empty results
431                            return Ok(Vec::new());
432                        }
433                    }
434                    ErrorKind::UndefinedColumn { table, column } => {
435                        // this means we didn't write the column yet, which suggests that all values are null
436                        // So we can recompute the predicate to treat this column as always NULL and retry
437                        debug!("Undefined column: {} in table: {:?}, {}", column, table, self.state_table());
438                        let new_selection = selection.assume_null(&[column]);
439                        return self.fetch_states(&new_selection).await;
440                    }
441                    _ => {}
442                }
443
444                return Err(RetrievalError::StorageError(err.into()));
445            }
446        };
447        pin_mut!(stream);
448
449        while let Some(row) = stream.try_next().await.map_err(RetrievalError::storage)? {
450            let id: EntityId = row.try_get(0).map_err(RetrievalError::storage)?;
451            let state_buffer: Vec<u8> = row.try_get(1).map_err(RetrievalError::storage)?;
452            let state_buffers: BTreeMap<String, Vec<u8>> = bincode::deserialize(&state_buffer).map_err(RetrievalError::storage)?;
453            let head: Clock = row.try_get("head").map_err(RetrievalError::storage)?;
454            let attestation_bytes: Vec<Vec<u8>> = row.try_get("attestations").map_err(RetrievalError::storage)?;
455            let attestations = attestation_bytes
456                .into_iter()
457                .map(|bytes| bincode::deserialize(&bytes))
458                .collect::<Result<Vec<Attestation>, _>>()
459                .map_err(RetrievalError::storage)?;
460
461            results.push(Attested {
462                payload: EntityState {
463                    entity_id: id,
464                    collection: self.collection_id.clone(),
465                    state: State { state_buffers: StateBuffers(state_buffers), head },
466                },
467                attestations: AttestationSet(attestations),
468            });
469        }
470
471        Ok(results)
472    }
473
474    async fn add_event(&self, entity_event: &Attested<Event>) -> Result<bool, MutationError> {
475        let operations = bincode::serialize(&entity_event.payload.operations)?;
476        let attestations = bincode::serialize(&entity_event.attestations)?;
477
478        let query = format!(
479            r#"INSERT INTO "{0}"("id", "entity_id", "operations", "parent", "attestations") VALUES($1, $2, $3, $4, $5)"#,
480            self.event_table(),
481        );
482
483        let mut client = self.pool.get().await.map_err(|err| MutationError::General(err.into()))?;
484        debug!("PostgresBucket({}).add_event: {}", self.collection_id, query);
485        let affected = match client
486            .execute(
487                &query,
488                &[&entity_event.payload.id(), &entity_event.payload.entity_id, &operations, &entity_event.payload.parent, &attestations],
489            )
490            .await
491        {
492            Ok(affected) => affected,
493            Err(err) => {
494                let kind = error_kind(&err);
495                match kind {
496                    ErrorKind::UndefinedTable { table } => {
497                        if table == self.event_table() {
498                            self.create_event_table(&mut client).await?;
499                            return self.add_event(entity_event).await; // retry
500                        }
501                    }
502                    _ => {
503                        error!("PostgresBucket({}).add_event: Error: {:?}", self.collection_id, err);
504                    }
505                }
506
507                return Err(StateError::DMLError(Box::new(err)).into());
508            }
509        };
510
511        Ok(affected > 0)
512    }
513
514    async fn get_events(&self, event_ids: Vec<EventId>) -> Result<Vec<Attested<Event>>, RetrievalError> {
515        if event_ids.is_empty() {
516            return Ok(Vec::new());
517        }
518
519        let query = format!(
520            r#"SELECT "id", "entity_id", "operations", "parent", "attestations" FROM "{0}" WHERE "id" = ANY($1)"#,
521            self.event_table(),
522        );
523
524        let client = self.pool.get().await.map_err(RetrievalError::storage)?;
525        let rows = match client.query(&query, &[&event_ids]).await {
526            Ok(rows) => rows,
527            Err(err) => {
528                let kind = error_kind(&err);
529                match kind {
530                    ErrorKind::UndefinedTable { table } if table == self.event_table() => return Ok(Vec::new()),
531                    _ => return Err(RetrievalError::storage(err)),
532                }
533            }
534        };
535
536        let mut events = Vec::new();
537        for row in rows {
538            let entity_id: EntityId = row.try_get("entity_id").map_err(RetrievalError::storage)?;
539            let operations: OperationSet = row.try_get("operations").map_err(RetrievalError::storage)?;
540            let parent: Clock = row.try_get("parent").map_err(RetrievalError::storage)?;
541            let attestations_binary: Vec<u8> = row.try_get("attestations").map_err(RetrievalError::storage)?;
542            let attestations: Vec<Attestation> = bincode::deserialize(&attestations_binary).map_err(RetrievalError::storage)?;
543
544            let event = Attested {
545                payload: Event { collection: self.collection_id.clone(), entity_id, operations, parent },
546                attestations: AttestationSet(attestations),
547            };
548            events.push(event);
549        }
550        Ok(events)
551    }
552
553    async fn dump_entity_events(&self, entity_id: EntityId) -> Result<Vec<Attested<Event>>, ankurah_core::error::RetrievalError> {
554        let query =
555            format!(r#"SELECT "id", "operations", "parent", "attestations" FROM "{0}" WHERE "entity_id" = $1"#, self.event_table(),);
556
557        let client = self.pool.get().await.map_err(RetrievalError::storage)?;
558        debug!("PostgresBucket({}).get_events: {}", self.collection_id, query);
559        let rows = match client.query(&query, &[&entity_id]).await {
560            Ok(rows) => rows,
561            Err(err) => {
562                let kind = error_kind(&err);
563                if let ErrorKind::UndefinedTable { table } = kind {
564                    if table == self.event_table() {
565                        return Ok(Vec::new());
566                    }
567                }
568
569                return Err(RetrievalError::storage(err));
570            }
571        };
572
573        let mut events = Vec::new();
574        for row in rows {
575            // let event_id: EventId = row.try_get("id").map_err(|err| RetrievalError::storage(err))?;
576            let operations_binary: Vec<u8> = row.try_get("operations").map_err(RetrievalError::storage)?;
577            let operations = bincode::deserialize(&operations_binary).map_err(RetrievalError::storage)?;
578            let parent: Clock = row.try_get("parent").map_err(RetrievalError::storage)?;
579            let attestations_binary: Vec<u8> = row.try_get("attestations").map_err(RetrievalError::storage)?;
580            let attestations: Vec<Attestation> = bincode::deserialize(&attestations_binary).map_err(RetrievalError::storage)?;
581
582            events.push(Attested {
583                payload: Event { collection: self.collection_id.clone(), entity_id, operations, parent },
584                attestations: AttestationSet(attestations),
585            });
586        }
587
588        Ok(events)
589    }
590}
591
592// Some hacky shit because rust-postgres doesn't let us ask for the error kind
593// TODO: remove this when https://github.com/sfackler/rust-postgres/pull/1185
594//       gets merged
595#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
596pub enum ErrorKind {
597    RowCount,
598    UndefinedTable { table: String },
599    UndefinedColumn { table: Option<String>, column: String },
600    Unknown,
601}
602
603pub fn error_kind(err: &tokio_postgres::Error) -> ErrorKind {
604    let string = err.to_string().trim().to_owned();
605    let _db_error = err.as_db_error();
606    let sql_code = err.code().cloned();
607
608    if string == "query returned an unexpected number of rows" {
609        return ErrorKind::RowCount;
610    }
611
612    // Useful for adding new errors
613    // error!("postgres error: {:?}", err);
614    // error!("db_err: {:?}", err.as_db_error());
615    // error!("sql_code: {:?}", err.code());
616    // error!("err: {:?}", err);
617    // error!("err: {:?}", err.to_string());
618
619    let quote_indices = |s: &str| {
620        let mut quotes = Vec::new();
621        for (index, char) in s.char_indices() {
622            if char == '"' {
623                quotes.push(index)
624            }
625        }
626        quotes
627    };
628
629    match sql_code {
630        Some(SqlState::UNDEFINED_TABLE) => {
631            // relation "album" does not exist
632            let quotes = quote_indices(&string);
633            let table = &string[quotes[0] + 1..quotes[1]];
634            ErrorKind::UndefinedTable { table: table.to_owned() }
635        }
636        Some(SqlState::UNDEFINED_COLUMN) => {
637            // Handle both formats:
638            // "column "name" of relation "album" does not exist"
639            // "column "status" does not exist"
640            let quotes = quote_indices(&string);
641            let column = string[quotes[0] + 1..quotes[1]].to_owned();
642
643            let table = if quotes.len() >= 4 {
644                // Full format with table name
645                Some(string[quotes[2] + 1..quotes[3]].to_owned())
646            } else {
647                // Short format without table name, use empty string
648                None
649            };
650
651            ErrorKind::UndefinedColumn { table, column }
652        }
653        _ => ErrorKind::Unknown,
654    }
655}
656
657#[allow(unused)]
658pub struct MissingMaterialized {
659    pub name: String,
660}
661
662use bytes::BytesMut;
663use tokio_postgres::types::{to_sql_checked, IsNull, Type};
664
665use crate::sql_builder::SqlBuilder;
666
667#[derive(Debug)]
668struct UntypedNull;
669
670impl ToSql for UntypedNull {
671    fn to_sql(&self, _ty: &Type, _out: &mut BytesMut) -> Result<IsNull, Box<dyn std::error::Error + Sync + Send>> { Ok(IsNull::Yes) }
672
673    fn accepts(_ty: &Type) -> bool {
674        true // Accept all types
675    }
676
677    to_sql_checked!();
678}