use serde::{Deserialize, Serialize};
use crate::error::{QueryError, QueryResult};
use crate::sql::DatabaseType;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct LateralJoin {
pub subquery: String,
pub alias: String,
pub join_type: LateralJoinType,
pub condition: Option<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum LateralJoinType {
Cross,
Left,
}
impl LateralJoin {
pub fn new(subquery: impl Into<String>, alias: impl Into<String>) -> LateralJoinBuilder {
LateralJoinBuilder::new(subquery, alias)
}
pub fn to_postgres_sql(&self) -> String {
match self.join_type {
LateralJoinType::Cross => {
format!("CROSS JOIN LATERAL ({}) AS {}", self.subquery, self.alias)
}
LateralJoinType::Left => {
let cond = self.condition.as_deref().unwrap_or("TRUE");
format!(
"LEFT JOIN LATERAL ({}) AS {} ON {}",
self.subquery, self.alias, cond
)
}
}
}
pub fn to_mysql_sql(&self) -> String {
match self.join_type {
LateralJoinType::Cross => {
format!("CROSS JOIN LATERAL ({}) AS {}", self.subquery, self.alias)
}
LateralJoinType::Left => {
let cond = self.condition.as_deref().unwrap_or("TRUE");
format!(
"LEFT JOIN LATERAL ({}) AS {} ON {}",
self.subquery, self.alias, cond
)
}
}
}
pub fn to_mssql_sql(&self) -> String {
match self.join_type {
LateralJoinType::Cross => {
format!("CROSS APPLY ({}) AS {}", self.subquery, self.alias)
}
LateralJoinType::Left => {
format!("OUTER APPLY ({}) AS {}", self.subquery, self.alias)
}
}
}
pub fn to_sql(&self, db_type: DatabaseType) -> QueryResult<String> {
match db_type {
DatabaseType::PostgreSQL => Ok(self.to_postgres_sql()),
DatabaseType::MySQL => Ok(self.to_mysql_sql()),
DatabaseType::MSSQL => Ok(self.to_mssql_sql()),
DatabaseType::SQLite => Err(QueryError::unsupported(
"LATERAL joins are not supported in SQLite",
)),
}
}
}
#[derive(Debug, Clone)]
pub struct LateralJoinBuilder {
subquery: String,
alias: String,
join_type: LateralJoinType,
condition: Option<String>,
}
impl LateralJoinBuilder {
pub fn new(subquery: impl Into<String>, alias: impl Into<String>) -> Self {
Self {
subquery: subquery.into(),
alias: alias.into(),
join_type: LateralJoinType::Cross,
condition: None,
}
}
pub fn left(mut self) -> Self {
self.join_type = LateralJoinType::Left;
self
}
pub fn cross(mut self) -> Self {
self.join_type = LateralJoinType::Cross;
self
}
pub fn on(mut self, condition: impl Into<String>) -> Self {
self.condition = Some(condition.into());
self
}
pub fn build(self) -> LateralJoin {
LateralJoin {
subquery: self.subquery,
alias: self.alias,
join_type: self.join_type,
condition: self.condition,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct DistinctOn {
pub columns: Vec<String>,
}
impl DistinctOn {
pub fn new<I, S>(columns: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
Self {
columns: columns.into_iter().map(Into::into).collect(),
}
}
pub fn to_postgres_sql(&self) -> String {
format!("DISTINCT ON ({})", self.columns.join(", "))
}
pub fn to_mysql_workaround(&self) -> String {
format!(
"-- MySQL workaround: Use GROUP BY {} with appropriate aggregates",
self.columns.join(", ")
)
}
}
pub mod mongodb_distinct {
use serde_json::Value as JsonValue;
pub fn distinct_on_stage(group_fields: &[&str], first_fields: &[&str]) -> JsonValue {
let mut group_id = serde_json::Map::new();
for field in group_fields {
group_id.insert(field.to_string(), serde_json::json!(format!("${}", field)));
}
let mut group_spec = serde_json::Map::new();
group_spec.insert("_id".to_string(), serde_json::json!(group_id));
for field in first_fields {
group_spec.insert(
field.to_string(),
serde_json::json!({ "$first": format!("${}", field) }),
);
}
serde_json::json!({ "$group": group_spec })
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Returning {
pub columns: Vec<ReturningColumn>,
pub operation: ReturnOperation,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum ReturningColumn {
All,
Column(String),
Expression { expr: String, alias: String },
Inserted(String),
Deleted(String),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum ReturnOperation {
Insert,
Update,
Delete,
}
impl Returning {
pub fn all(operation: ReturnOperation) -> Self {
Self {
columns: vec![ReturningColumn::All],
operation,
}
}
pub fn columns<I, S>(operation: ReturnOperation, columns: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
Self {
columns: columns
.into_iter()
.map(|c| ReturningColumn::Column(c.into()))
.collect(),
operation,
}
}
pub fn to_postgres_sql(&self) -> String {
let cols = self.format_columns(DatabaseType::PostgreSQL);
format!("RETURNING {}", cols)
}
pub fn to_sqlite_sql(&self) -> String {
let cols = self.format_columns(DatabaseType::SQLite);
format!("RETURNING {}", cols)
}
pub fn to_mssql_sql(&self) -> String {
let cols = self.format_columns(DatabaseType::MSSQL);
format!("OUTPUT {}", cols)
}
fn format_columns(&self, db_type: DatabaseType) -> String {
self.columns
.iter()
.map(|col| match col {
ReturningColumn::All => {
if db_type == DatabaseType::MSSQL {
match self.operation {
ReturnOperation::Insert => "INSERTED.*".to_string(),
ReturnOperation::Delete => "DELETED.*".to_string(),
ReturnOperation::Update => "INSERTED.*".to_string(),
}
} else {
"*".to_string()
}
}
ReturningColumn::Column(name) => {
if db_type == DatabaseType::MSSQL {
match self.operation {
ReturnOperation::Insert => format!("INSERTED.{}", name),
ReturnOperation::Delete => format!("DELETED.{}", name),
ReturnOperation::Update => format!("INSERTED.{}", name),
}
} else {
name.clone()
}
}
ReturningColumn::Expression { expr, alias } => format!("{} AS {}", expr, alias),
ReturningColumn::Inserted(name) => format!("INSERTED.{}", name),
ReturningColumn::Deleted(name) => format!("DELETED.{}", name),
})
.collect::<Vec<_>>()
.join(", ")
}
pub fn to_sql(&self, db_type: DatabaseType) -> QueryResult<String> {
match db_type {
DatabaseType::PostgreSQL => Ok(self.to_postgres_sql()),
DatabaseType::SQLite => Ok(self.to_sqlite_sql()),
DatabaseType::MSSQL => Ok(self.to_mssql_sql()),
DatabaseType::MySQL => Err(QueryError::unsupported(
"RETURNING clause is not supported in MySQL. Consider using LAST_INSERT_ID() or separate SELECT.",
)),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct RowLock {
pub strength: LockStrength,
pub of_tables: Vec<String>,
pub wait: LockWait,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum LockStrength {
Update,
NoKeyUpdate,
Share,
KeyShare,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum LockWait {
Wait,
NoWait,
SkipLocked,
}
impl RowLock {
pub fn for_update() -> RowLockBuilder {
RowLockBuilder::new(LockStrength::Update)
}
pub fn for_share() -> RowLockBuilder {
RowLockBuilder::new(LockStrength::Share)
}
pub fn for_no_key_update() -> RowLockBuilder {
RowLockBuilder::new(LockStrength::NoKeyUpdate)
}
pub fn for_key_share() -> RowLockBuilder {
RowLockBuilder::new(LockStrength::KeyShare)
}
pub fn to_postgres_sql(&self) -> String {
let strength = match self.strength {
LockStrength::Update => "FOR UPDATE",
LockStrength::NoKeyUpdate => "FOR NO KEY UPDATE",
LockStrength::Share => "FOR SHARE",
LockStrength::KeyShare => "FOR KEY SHARE",
};
let mut sql = strength.to_string();
if !self.of_tables.is_empty() {
sql.push_str(&format!(" OF {}", self.of_tables.join(", ")));
}
match self.wait {
LockWait::Wait => {}
LockWait::NoWait => sql.push_str(" NOWAIT"),
LockWait::SkipLocked => sql.push_str(" SKIP LOCKED"),
}
sql
}
pub fn to_mysql_sql(&self) -> String {
let strength = match self.strength {
LockStrength::Update | LockStrength::NoKeyUpdate => "FOR UPDATE",
LockStrength::Share | LockStrength::KeyShare => "FOR SHARE",
};
let mut sql = strength.to_string();
if !self.of_tables.is_empty() {
sql.push_str(&format!(" OF {}", self.of_tables.join(", ")));
}
match self.wait {
LockWait::Wait => {}
LockWait::NoWait => sql.push_str(" NOWAIT"),
LockWait::SkipLocked => sql.push_str(" SKIP LOCKED"),
}
sql
}
pub fn to_mssql_hint(&self) -> String {
let hint = match self.strength {
LockStrength::Update | LockStrength::NoKeyUpdate => "UPDLOCK, ROWLOCK",
LockStrength::Share | LockStrength::KeyShare => "HOLDLOCK, ROWLOCK",
};
let wait_hint = match self.wait {
LockWait::Wait => "",
LockWait::NoWait => ", NOWAIT",
LockWait::SkipLocked => ", READPAST",
};
format!("WITH ({}{})", hint, wait_hint)
}
pub fn to_sql(&self, db_type: DatabaseType) -> QueryResult<String> {
match db_type {
DatabaseType::PostgreSQL => Ok(self.to_postgres_sql()),
DatabaseType::MySQL => Ok(self.to_mysql_sql()),
DatabaseType::MSSQL => Ok(self.to_mssql_hint()),
DatabaseType::SQLite => Err(QueryError::unsupported(
"Row locking is not supported in SQLite",
)),
}
}
}
#[derive(Debug, Clone)]
pub struct RowLockBuilder {
strength: LockStrength,
of_tables: Vec<String>,
wait: LockWait,
}
impl RowLockBuilder {
pub fn new(strength: LockStrength) -> Self {
Self {
strength,
of_tables: Vec::new(),
wait: LockWait::Wait,
}
}
pub fn of<I, S>(mut self, tables: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
self.of_tables = tables.into_iter().map(Into::into).collect();
self
}
pub fn nowait(mut self) -> Self {
self.wait = LockWait::NoWait;
self
}
pub fn skip_locked(mut self) -> Self {
self.wait = LockWait::SkipLocked;
self
}
pub fn build(self) -> RowLock {
RowLock {
strength: self.strength,
of_tables: self.of_tables,
wait: self.wait,
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct TableSample {
pub method: SampleMethod,
pub size: SampleSize,
pub seed: Option<i64>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum SampleMethod {
Bernoulli,
System,
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum SampleSize {
Percent(f64),
Rows(usize),
}
impl TableSample {
pub fn percent(percent: f64) -> TableSampleBuilder {
TableSampleBuilder::new(SampleMethod::Bernoulli, SampleSize::Percent(percent))
}
pub fn rows(count: usize) -> TableSampleBuilder {
TableSampleBuilder::new(SampleMethod::System, SampleSize::Rows(count))
}
pub fn to_postgres_sql(&self) -> String {
let method = match self.method {
SampleMethod::Bernoulli => "BERNOULLI",
SampleMethod::System => "SYSTEM",
};
let size = match self.size {
SampleSize::Percent(p) => format!("{}", p),
SampleSize::Rows(_) => {
"10".to_string() }
};
let mut sql = format!("TABLESAMPLE {} ({})", method, size);
if let Some(seed) = self.seed {
sql.push_str(&format!(" REPEATABLE ({})", seed));
}
sql
}
pub fn to_mssql_sql(&self) -> String {
let size_clause = match self.size {
SampleSize::Percent(p) => format!("{} PERCENT", p),
SampleSize::Rows(n) => format!("{} ROWS", n),
};
let mut sql = format!("TABLESAMPLE ({})", size_clause);
if let Some(seed) = self.seed {
sql.push_str(&format!(" REPEATABLE ({})", seed));
}
sql
}
pub fn to_sql(&self, db_type: DatabaseType) -> QueryResult<String> {
match db_type {
DatabaseType::PostgreSQL => Ok(self.to_postgres_sql()),
DatabaseType::MSSQL => Ok(self.to_mssql_sql()),
DatabaseType::MySQL | DatabaseType::SQLite => Err(QueryError::unsupported(
"TABLESAMPLE is not supported in this database. Use ORDER BY RANDOM() LIMIT instead.",
)),
}
}
}
#[derive(Debug, Clone)]
pub struct TableSampleBuilder {
method: SampleMethod,
size: SampleSize,
seed: Option<i64>,
}
impl TableSampleBuilder {
pub fn new(method: SampleMethod, size: SampleSize) -> Self {
Self {
method,
size,
seed: None,
}
}
pub fn bernoulli(mut self) -> Self {
self.method = SampleMethod::Bernoulli;
self
}
pub fn system(mut self) -> Self {
self.method = SampleMethod::System;
self
}
pub fn seed(mut self, seed: i64) -> Self {
self.seed = Some(seed);
self
}
pub fn build(self) -> TableSample {
TableSample {
method: self.method,
size: self.size,
seed: self.seed,
}
}
}
pub mod random_sample {
use super::*;
pub fn order_by_random_sql(limit: usize, db_type: DatabaseType) -> String {
let random_func = match db_type {
DatabaseType::PostgreSQL => "RANDOM()",
DatabaseType::MySQL => "RAND()",
DatabaseType::SQLite => "RANDOM()",
DatabaseType::MSSQL => "NEWID()",
};
format!("ORDER BY {} LIMIT {}", random_func, limit)
}
pub fn where_random_sql(threshold: f64, db_type: DatabaseType) -> String {
match db_type {
DatabaseType::PostgreSQL | DatabaseType::SQLite => {
format!("WHERE RANDOM() < {}", threshold)
}
DatabaseType::MySQL => format!("WHERE RAND() < {}", threshold),
DatabaseType::MSSQL => {
format!(
"WHERE ABS(CHECKSUM(NEWID())) % 100 < {}",
(threshold * 100.0) as i32
)
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct BulkOperation<T> {
pub items: Vec<T>,
pub batch_size: usize,
pub ordered: bool,
}
impl<T> BulkOperation<T> {
pub fn new(items: Vec<T>) -> Self {
Self {
items,
batch_size: 1000,
ordered: true,
}
}
pub fn batch_size(mut self, size: usize) -> Self {
self.batch_size = size;
self
}
pub fn unordered(mut self) -> Self {
self.ordered = false;
self
}
pub fn batches(&self) -> impl Iterator<Item = &[T]> {
self.items.chunks(self.batch_size)
}
pub fn batch_count(&self) -> usize {
(self.items.len() + self.batch_size - 1) / self.batch_size
}
}
pub mod mongodb {
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum BulkWriteOp {
InsertOne { document: JsonValue },
UpdateOne {
filter: JsonValue,
update: JsonValue,
upsert: bool,
},
UpdateMany {
filter: JsonValue,
update: JsonValue,
upsert: bool,
},
ReplaceOne {
filter: JsonValue,
replacement: JsonValue,
upsert: bool,
},
DeleteOne { filter: JsonValue },
DeleteMany { filter: JsonValue },
}
impl BulkWriteOp {
pub fn insert_one(document: JsonValue) -> Self {
Self::InsertOne { document }
}
pub fn update_one(filter: JsonValue, update: JsonValue) -> Self {
Self::UpdateOne {
filter,
update,
upsert: false,
}
}
pub fn upsert_one(filter: JsonValue, update: JsonValue) -> Self {
Self::UpdateOne {
filter,
update,
upsert: true,
}
}
pub fn delete_one(filter: JsonValue) -> Self {
Self::DeleteOne { filter }
}
pub fn to_command(&self) -> JsonValue {
match self {
Self::InsertOne { document } => {
serde_json::json!({ "insertOne": { "document": document } })
}
Self::UpdateOne {
filter,
update,
upsert,
} => {
serde_json::json!({
"updateOne": {
"filter": filter,
"update": update,
"upsert": upsert
}
})
}
Self::UpdateMany {
filter,
update,
upsert,
} => {
serde_json::json!({
"updateMany": {
"filter": filter,
"update": update,
"upsert": upsert
}
})
}
Self::ReplaceOne {
filter,
replacement,
upsert,
} => {
serde_json::json!({
"replaceOne": {
"filter": filter,
"replacement": replacement,
"upsert": upsert
}
})
}
Self::DeleteOne { filter } => {
serde_json::json!({ "deleteOne": { "filter": filter } })
}
Self::DeleteMany { filter } => {
serde_json::json!({ "deleteMany": { "filter": filter } })
}
}
}
}
#[derive(Debug, Clone, Default)]
pub struct BulkWriteBuilder {
operations: Vec<BulkWriteOp>,
ordered: bool,
bypass_validation: bool,
}
impl BulkWriteBuilder {
pub fn new() -> Self {
Self {
operations: Vec::new(),
ordered: true,
bypass_validation: false,
}
}
pub fn add(mut self, op: BulkWriteOp) -> Self {
self.operations.push(op);
self
}
pub fn add_many<I>(mut self, ops: I) -> Self
where
I: IntoIterator<Item = BulkWriteOp>,
{
self.operations.extend(ops);
self
}
pub fn insert_one(self, document: JsonValue) -> Self {
self.add(BulkWriteOp::insert_one(document))
}
pub fn update_one(self, filter: JsonValue, update: JsonValue) -> Self {
self.add(BulkWriteOp::update_one(filter, update))
}
pub fn upsert_one(self, filter: JsonValue, update: JsonValue) -> Self {
self.add(BulkWriteOp::upsert_one(filter, update))
}
pub fn delete_one(self, filter: JsonValue) -> Self {
self.add(BulkWriteOp::delete_one(filter))
}
pub fn unordered(mut self) -> Self {
self.ordered = false;
self
}
pub fn bypass_validation(mut self) -> Self {
self.bypass_validation = true;
self
}
pub fn build(&self, collection: &str) -> JsonValue {
let ops: Vec<JsonValue> = self.operations.iter().map(|op| op.to_command()).collect();
serde_json::json!({
"bulkWrite": collection,
"operations": ops,
"ordered": self.ordered,
"bypassDocumentValidation": self.bypass_validation
})
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Sample {
pub size: usize,
}
impl Sample {
pub fn new(size: usize) -> Self {
Self { size }
}
pub fn to_stage(&self) -> JsonValue {
serde_json::json!({ "$sample": { "size": self.size } })
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_lateral_join_postgres() {
let lateral = LateralJoin::new(
"SELECT * FROM orders WHERE orders.user_id = users.id LIMIT 3",
"recent_orders",
)
.build();
let sql = lateral.to_postgres_sql();
assert!(sql.contains("CROSS JOIN LATERAL"));
assert!(sql.contains("AS recent_orders"));
}
#[test]
fn test_lateral_join_mssql() {
let lateral = LateralJoin::new(
"SELECT TOP 3 * FROM orders WHERE orders.user_id = users.id",
"recent_orders",
)
.left()
.build();
let sql = lateral.to_mssql_sql();
assert!(sql.contains("OUTER APPLY"));
}
#[test]
fn test_distinct_on() {
let distinct = DistinctOn::new(["department", "date"]);
let sql = distinct.to_postgres_sql();
assert!(sql.contains("DISTINCT ON (department, date)"));
}
#[test]
fn test_returning_postgres() {
let ret = Returning::all(ReturnOperation::Insert);
let sql = ret.to_postgres_sql();
assert_eq!(sql, "RETURNING *");
}
#[test]
fn test_returning_mssql() {
let ret = Returning::columns(ReturnOperation::Insert, ["id", "name"]);
let sql = ret.to_mssql_sql();
assert!(sql.contains("OUTPUT INSERTED.id, INSERTED.name"));
}
#[test]
fn test_for_update() {
let lock = RowLock::for_update().nowait().build();
let sql = lock.to_postgres_sql();
assert!(sql.contains("FOR UPDATE"));
assert!(sql.contains("NOWAIT"));
}
#[test]
fn test_for_share_skip_locked() {
let lock = RowLock::for_share().skip_locked().build();
let sql = lock.to_postgres_sql();
assert!(sql.contains("FOR SHARE"));
assert!(sql.contains("SKIP LOCKED"));
}
#[test]
fn test_row_lock_mssql() {
let lock = RowLock::for_update().nowait().build();
let sql = lock.to_mssql_hint();
assert!(sql.contains("UPDLOCK"));
assert!(sql.contains("NOWAIT"));
}
#[test]
fn test_tablesample_postgres() {
let sample = TableSample::percent(10.0).seed(42).build();
let sql = sample.to_postgres_sql();
assert!(sql.contains("TABLESAMPLE BERNOULLI (10)"));
assert!(sql.contains("REPEATABLE (42)"));
}
#[test]
fn test_tablesample_mssql() {
let sample = TableSample::rows(1000).build();
let sql = sample.to_mssql_sql();
assert!(sql.contains("TABLESAMPLE (1000 ROWS)"));
}
#[test]
fn test_bulk_operation_batches() {
let bulk: BulkOperation<i32> = BulkOperation::new(vec![1, 2, 3, 4, 5]).batch_size(2);
assert_eq!(bulk.batch_count(), 3);
let batches: Vec<_> = bulk.batches().collect();
assert_eq!(batches.len(), 3);
assert_eq!(batches[0], &[1, 2]);
assert_eq!(batches[1], &[3, 4]);
assert_eq!(batches[2], &[5]);
}
mod mongodb_tests {
use super::super::mongodb::*;
#[test]
fn test_bulk_write_builder() {
let bulk = BulkWriteBuilder::new()
.insert_one(serde_json::json!({ "name": "Alice" }))
.update_one(
serde_json::json!({ "_id": 1 }),
serde_json::json!({ "$set": { "status": "active" } }),
)
.delete_one(serde_json::json!({ "_id": 2 }))
.unordered()
.build("users");
assert_eq!(bulk["bulkWrite"], "users");
assert_eq!(bulk["ordered"], false);
assert!(bulk["operations"].is_array());
assert_eq!(bulk["operations"].as_array().unwrap().len(), 3);
}
#[test]
fn test_sample_stage() {
let sample = Sample::new(100);
let stage = sample.to_stage();
assert_eq!(stage["$sample"]["size"], 100);
}
#[test]
fn test_bulk_write_upsert() {
let op = BulkWriteOp::upsert_one(
serde_json::json!({ "email": "test@example.com" }),
serde_json::json!({ "$set": { "name": "Test" } }),
);
let cmd = op.to_command();
assert!(cmd["updateOne"]["upsert"].as_bool().unwrap());
}
}
mod distinct_on_tests {
use super::super::mongodb_distinct::*;
#[test]
fn test_distinct_on_stage() {
let stage = distinct_on_stage(&["department"], &["name", "salary"]);
assert!(stage["$group"]["_id"]["department"].is_string());
assert!(stage["$group"]["name"]["$first"].is_string());
assert!(stage["$group"]["salary"]["$first"].is_string());
}
}
}