use super::super::accounts::DEFAULT_ACCOUNT_ID;
use super::super::DbPool;
use crate::error::StorageError;
#[derive(Debug, Clone, serde::Serialize)]
pub struct PerformanceItem {
pub content_type: String,
pub content_preview: String,
pub likes: i64,
pub replies_received: i64,
pub retweets: i64,
pub impressions: i64,
pub performance_score: f64,
pub posted_at: String,
}
type PerformanceRow = (String, String, i64, i64, i64, i64, f64, String);
pub async fn get_recent_performance_items_for(
pool: &DbPool,
account_id: &str,
limit: u32,
) -> Result<Vec<PerformanceItem>, StorageError> {
let rows: Vec<PerformanceRow> = sqlx::query_as(
"SELECT 'reply' as content_type, \
SUBSTR(rs.reply_content, 1, 120) as content_preview, \
rp.likes_received, rp.replies_received, 0 as retweets, \
rp.impressions, rp.performance_score, rs.created_at as posted_at \
FROM reply_performance rp \
JOIN replies_sent rs ON rs.reply_tweet_id = rp.reply_id \
WHERE rp.account_id = ? \
UNION ALL \
SELECT 'tweet' as content_type, \
SUBSTR(ot.content, 1, 120) as content_preview, \
tp.likes_received, tp.replies_received, tp.retweets_received, \
tp.impressions, tp.performance_score, ot.created_at as posted_at \
FROM tweet_performance tp \
JOIN original_tweets ot ON ot.tweet_id = tp.tweet_id \
WHERE tp.account_id = ? \
ORDER BY posted_at DESC \
LIMIT ?",
)
.bind(account_id)
.bind(account_id)
.bind(limit)
.fetch_all(pool)
.await
.map_err(|e| StorageError::Query { source: e })?;
Ok(rows
.into_iter()
.map(|r| PerformanceItem {
content_type: r.0,
content_preview: r.1,
likes: r.2,
replies_received: r.3,
retweets: r.4,
impressions: r.5,
performance_score: r.6,
posted_at: r.7,
})
.collect())
}
pub async fn get_recent_performance_items(
pool: &DbPool,
limit: u32,
) -> Result<Vec<PerformanceItem>, StorageError> {
get_recent_performance_items_for(pool, DEFAULT_ACCOUNT_ID, limit).await
}
#[derive(Debug, Clone, serde::Serialize)]
pub struct ContentBreakdown {
pub content_type: String,
pub count: i64,
pub avg_performance: f64,
pub total_impressions: i64,
}
pub async fn get_content_breakdown_for(
pool: &DbPool,
account_id: &str,
) -> Result<Vec<ContentBreakdown>, StorageError> {
let rows: Vec<(String, i64, f64, i64)> = sqlx::query_as(
"SELECT 'reply' as content_type, \
COUNT(*) as count, \
COALESCE(AVG(performance_score), 0.0) as avg_performance, \
COALESCE(SUM(impressions), 0) as total_impressions \
FROM reply_performance \
WHERE account_id = ? \
UNION ALL \
SELECT 'tweet' as content_type, \
COUNT(*) as count, \
COALESCE(AVG(performance_score), 0.0) as avg_performance, \
COALESCE(SUM(impressions), 0) as total_impressions \
FROM tweet_performance \
WHERE account_id = ? \
ORDER BY count DESC",
)
.bind(account_id)
.bind(account_id)
.fetch_all(pool)
.await
.map_err(|e| StorageError::Query { source: e })?;
Ok(rows
.into_iter()
.filter(|(_, count, _, _)| *count > 0)
.map(
|(content_type, count, avg_performance, total_impressions)| ContentBreakdown {
content_type,
count,
avg_performance,
total_impressions,
},
)
.collect())
}
pub async fn get_content_breakdown(pool: &DbPool) -> Result<Vec<ContentBreakdown>, StorageError> {
get_content_breakdown_for(pool, DEFAULT_ACCOUNT_ID).await
}
#[derive(Debug, Clone, serde::Serialize)]
pub struct HourlyPerformance {
pub hour: i64,
pub avg_engagement: f64,
pub post_count: i64,
}
pub async fn get_optimal_posting_times_for(
pool: &DbPool,
account_id: &str,
) -> Result<Vec<HourlyPerformance>, StorageError> {
let rows: Vec<(i64, f64, i64)> = sqlx::query_as(
"SELECT
CAST(strftime('%H', ot.created_at) AS INTEGER) as hour,
COALESCE(AVG(tp.performance_score), 0.0) as avg_engagement,
COUNT(*) as post_count
FROM original_tweets ot
LEFT JOIN tweet_performance tp ON tp.tweet_id = ot.tweet_id
WHERE ot.account_id = ? AND ot.status = 'sent' AND ot.tweet_id IS NOT NULL
GROUP BY hour
ORDER BY avg_engagement DESC",
)
.bind(account_id)
.fetch_all(pool)
.await
.map_err(|e| StorageError::Query { source: e })?;
Ok(rows
.into_iter()
.map(|(hour, avg_engagement, post_count)| HourlyPerformance {
hour,
avg_engagement,
post_count,
})
.collect())
}
pub async fn get_optimal_posting_times(
pool: &DbPool,
) -> Result<Vec<HourlyPerformance>, StorageError> {
get_optimal_posting_times_for(pool, DEFAULT_ACCOUNT_ID).await
}