use super::b_partitioned::{BListUse, BPartitionedMeta};
use super::intra_predictor::satd_16x16;
use super::motion_compensation::{apply_luma_mv_block, apply_luma_mv_block_bipred};
use super::motion_estimation::MotionVector;
use super::reference_buffer::ReconFrame;
pub const LAMBDA_TAB_B: [u32; 52] = [
5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 17, 19, 22, 25, 28, 31, 35, 39, 44, 49, 55, 62, 69, 78, 87, 98, 110, 124, 139, 156, 175, 196, 220, 247, 278, 312, 350, 393, 441, 495, 555, 623, 699, 785, 881, 989, 1110, 1246, 1399, 1570, 1762, 1978, ];
const RARE_BIN_COST: u32 = 2;
const PSY_RD_SHIFT_DEFAULT: u32 = 4;
pub(crate) fn psy_hf_satd_16x16(block: &[[u8; 16]; 16]) -> u32 {
let mut sum: u32 = 0;
for row in block {
for &v in row {
sum += v as u32;
}
}
let mean = (sum / 256).min(255) as u8;
let mut flat = [[0u8; 16]; 16];
for row in flat.iter_mut() {
for v in row.iter_mut() {
*v = mean;
}
}
crate::codec::h264::encoder::intra_predictor::satd_16x16(block, &flat)
}
pub(crate) fn psy_rd_config() -> (bool, u32) {
let enabled = std::env::var_os("PHASM_PSY_RD")
.map(|v| v == "1")
.unwrap_or(false);
let shift = std::env::var("PHASM_PSY_RD_SHIFT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(PSY_RD_SHIFT_DEFAULT);
(enabled, shift)
}
fn mvd_rate_estimate(mvd_x: i16, mvd_y: i16) -> u32 {
let log2_plus_1 = |v: i16| -> u32 {
let abs = v.unsigned_abs() as u32;
if abs == 0 { 1 } else { 32 - abs.leading_zeros() }
};
2 + log2_plus_1(mvd_x) + 2 + log2_plus_1(mvd_y)
}
fn paint_partition_luma(
mb_buf: &mut [[u8; 16]; 16],
part_off_4x4: (usize, usize),
part_dim_4x4: (usize, usize),
list: BListUse,
mv_l0: MotionVector,
mv_l1: MotionVector,
l0_ref: &ReconFrame,
l1_ref: &ReconFrame,
mb_x: usize,
mb_y: usize,
) {
let part_x_px = ((mb_x * 16) + part_off_4x4.0 * 4) as u32;
let part_y_px = ((mb_y * 16) + part_off_4x4.1 * 4) as u32;
let part_w_px = (part_dim_4x4.0 * 4) as u32;
let part_h_px = (part_dim_4x4.1 * 4) as u32;
let mut scratch = [0u8; 16 * 16];
match list {
BListUse::L0 => apply_luma_mv_block(
l0_ref, part_x_px, part_y_px, part_w_px, part_h_px,
mv_l0, &mut scratch, 16,
),
BListUse::L1 => apply_luma_mv_block(
l1_ref, part_x_px, part_y_px, part_w_px, part_h_px,
mv_l1, &mut scratch, 16,
),
BListUse::Bi => apply_luma_mv_block_bipred(
l0_ref, mv_l0, l1_ref, mv_l1,
part_x_px, part_y_px, part_w_px, part_h_px,
&mut scratch, 16,
),
}
let dst_x0 = part_off_4x4.0 * 4;
let dst_y0 = part_off_4x4.1 * 4;
let w = part_dim_4x4.0 * 4;
let h = part_dim_4x4.1 * 4;
for dy in 0..h {
for dx in 0..w {
mb_buf[dst_y0 + dy][dst_x0 + dx] = scratch[dy * 16 + dx];
}
}
}
fn paint_sub_mb_luma(
mb_buf: &mut [[u8; 16]; 16],
sub_idx: usize,
sub_mb_type: u8,
mvs: &BPartitionMvPair,
l0_ref: &ReconFrame,
l1_ref: &ReconFrame,
mb_x: usize,
mb_y: usize,
) {
let (sub_x_4, sub_y_4) = match sub_idx {
0 => (0, 0),
1 => (2, 0),
2 => (0, 2),
3 => (2, 2),
_ => unreachable!("B_8x8 sub_idx must be 0..=3"),
};
let sub_x_px = ((mb_x * 16) + sub_x_4 * 4) as u32;
let sub_y_px = ((mb_y * 16) + sub_y_4 * 4) as u32;
let mut scratch = [0u8; 16 * 16];
match sub_mb_type {
0 | 3 => {
apply_luma_mv_block_bipred(
l0_ref, mvs.mv_l0, l1_ref, mvs.mv_l1,
sub_x_px, sub_y_px, 8, 8,
&mut scratch, 16,
);
}
1 => apply_luma_mv_block(
l0_ref, sub_x_px, sub_y_px, 8, 8, mvs.mv_l0,
&mut scratch, 16,
),
2 => apply_luma_mv_block(
l1_ref, sub_x_px, sub_y_px, 8, 8, mvs.mv_l1,
&mut scratch, 16,
),
_ => debug_assert!(false, "B_8x8 sub_mb_type {sub_mb_type} out of §6E-A6.3 scope"),
}
let dst_x0 = sub_x_4 * 4;
let dst_y0 = sub_y_4 * 4;
for dy in 0..8 {
for dx in 0..8 {
mb_buf[dst_y0 + dy][dst_x0 + dx] = scratch[dy * 16 + dx];
}
}
}
fn b_8x8_overhead_bits(sub_mb_types: &[u8; 4], parts: &[BPartitionMvPair; 4]) -> u32 {
let mut total = 1 + 5 * RARE_BIN_COST;
for i in 0..4 {
let sub = sub_mb_types[i];
let p = &parts[i];
total += if sub == 0 { 1 } else { 1 + 2 * RARE_BIN_COST };
match sub {
0 => {} 1 => total += mvd_rate_estimate(p.mv_l0.mv_x, p.mv_l0.mv_y),
2 => total += mvd_rate_estimate(p.mv_l1.mv_x, p.mv_l1.mv_y),
3 => {
total += mvd_rate_estimate(p.mv_l0.mv_x, p.mv_l0.mv_y);
total += mvd_rate_estimate(p.mv_l1.mv_x, p.mv_l1.mv_y);
}
_ => debug_assert!(false, "B_8x8 sub_mb_type {sub} out of scope"),
}
}
total += 4; total
}
fn partitioned_overhead_bits(meta: &BPartitionedMeta,
p0: &BPartitionMvPair, p1: &BPartitionMvPair) -> u32 {
let mvd_for = |use_: BListUse, p: &BPartitionMvPair| -> u32 {
match use_ {
BListUse::L0 => mvd_rate_estimate(p.mv_l0.mv_x, p.mv_l0.mv_y),
BListUse::L1 => mvd_rate_estimate(p.mv_l1.mv_x, p.mv_l1.mv_y),
BListUse::Bi => {
mvd_rate_estimate(p.mv_l0.mv_x, p.mv_l0.mv_y)
+ mvd_rate_estimate(p.mv_l1.mv_x, p.mv_l1.mv_y)
}
}
};
1 + 4 * RARE_BIN_COST
+ mvd_for(meta.part0, p0)
+ mvd_for(meta.part1, p1)
+ 4 }
#[derive(Debug, Clone, Copy)]
pub struct BPartitionMvPair {
pub mv_l0: MotionVector,
pub mv_l1: MotionVector,
}
#[allow(non_camel_case_types)] #[derive(Debug, Clone, Copy)]
pub enum BMbCandidate {
SkipOrDirect {
mv_l0: MotionVector,
mv_l1: MotionVector,
uses_l0: bool,
uses_l1: bool,
},
L0_16x16 { mv_l0: MotionVector },
L1_16x16 { mv_l1: MotionVector },
Bi_16x16 { mv_l0: MotionVector, mv_l1: MotionVector },
Partitioned {
meta: BPartitionedMeta,
part0_mvs: BPartitionMvPair,
part1_mvs: BPartitionMvPair,
},
B_8x8 {
sub_mb_types: [u8; 4],
parts: [BPartitionMvPair; 4],
},
}
#[derive(Debug, Clone, Copy)]
pub struct BMbRdoResult {
pub candidate: BMbCandidate,
pub satd: u32,
pub r_bits: u32,
pub cost: u64,
}
pub fn evaluate_b_mb_rdo(
candidate: &BMbCandidate,
src_y: &[[u8; 16]; 16],
l0_ref: &ReconFrame,
l1_ref: &ReconFrame,
mb_x: usize,
mb_y: usize,
mb_qp: u8,
) -> BMbRdoResult {
let mb_px_x = (mb_x * 16) as u32;
let mb_px_y = (mb_y * 16) as u32;
let mut pred_y = [[0u8; 16]; 16];
let pred_flat = pred_y.as_flattened_mut();
match *candidate {
BMbCandidate::SkipOrDirect { mv_l0, mv_l1, uses_l0, uses_l1 } => {
match (uses_l0, uses_l1) {
(true, true) => apply_luma_mv_block_bipred(
l0_ref, mv_l0, l1_ref, mv_l1,
mb_px_x, mb_px_y, 16, 16, pred_flat, 16,
),
(true, false) => apply_luma_mv_block(
l0_ref, mb_px_x, mb_px_y, 16, 16, mv_l0, pred_flat, 16,
),
(false, true) => apply_luma_mv_block(
l1_ref, mb_px_x, mb_px_y, 16, 16, mv_l1, pred_flat, 16,
),
(false, false) => {
apply_luma_mv_block(
l0_ref, mb_px_x, mb_px_y, 16, 16,
MotionVector::ZERO, pred_flat, 16,
);
}
}
}
BMbCandidate::L0_16x16 { mv_l0 } => apply_luma_mv_block(
l0_ref, mb_px_x, mb_px_y, 16, 16, mv_l0, pred_flat, 16,
),
BMbCandidate::L1_16x16 { mv_l1 } => apply_luma_mv_block(
l1_ref, mb_px_x, mb_px_y, 16, 16, mv_l1, pred_flat, 16,
),
BMbCandidate::Bi_16x16 { mv_l0, mv_l1 } => apply_luma_mv_block_bipred(
l0_ref, mv_l0, l1_ref, mv_l1,
mb_px_x, mb_px_y, 16, 16, pred_flat, 16,
),
BMbCandidate::Partitioned { meta, part0_mvs, part1_mvs } => {
let dim = meta.shape.part_dim_4x4();
let off0 = meta.shape.part_offset(0);
let off1 = meta.shape.part_offset(1);
paint_partition_luma(
&mut pred_y, off0, dim, meta.part0,
part0_mvs.mv_l0, part0_mvs.mv_l1,
l0_ref, l1_ref, mb_x, mb_y,
);
paint_partition_luma(
&mut pred_y, off1, dim, meta.part1,
part1_mvs.mv_l0, part1_mvs.mv_l1,
l0_ref, l1_ref, mb_x, mb_y,
);
}
BMbCandidate::B_8x8 { sub_mb_types, parts } => {
for i in 0..4 {
paint_sub_mb_luma(
&mut pred_y, i, sub_mb_types[i], &parts[i],
l0_ref, l1_ref, mb_x, mb_y,
);
}
}
}
let satd = satd_16x16(src_y, &pred_y);
let mut r_bits = 1; match *candidate {
BMbCandidate::SkipOrDirect { .. } => {
r_bits += 1; r_bits += 4; }
BMbCandidate::L0_16x16 { mv_l0 } => {
r_bits += 1 + 2 * RARE_BIN_COST;
r_bits += mvd_rate_estimate(mv_l0.mv_x, mv_l0.mv_y);
r_bits += 4; }
BMbCandidate::L1_16x16 { mv_l1 } => {
r_bits += 1 + 2 * RARE_BIN_COST;
r_bits += mvd_rate_estimate(mv_l1.mv_x, mv_l1.mv_y);
r_bits += 4;
}
BMbCandidate::Bi_16x16 { mv_l0, mv_l1 } => {
r_bits += 1 + 2 * RARE_BIN_COST;
r_bits += mvd_rate_estimate(mv_l0.mv_x, mv_l0.mv_y);
r_bits += mvd_rate_estimate(mv_l1.mv_x, mv_l1.mv_y);
r_bits += 4;
}
BMbCandidate::Partitioned { meta, part0_mvs, part1_mvs } => {
r_bits += partitioned_overhead_bits(&meta, &part0_mvs, &part1_mvs);
}
BMbCandidate::B_8x8 { sub_mb_types, parts } => {
r_bits += b_8x8_overhead_bits(&sub_mb_types, &parts);
}
}
let lambda = LAMBDA_TAB_B[mb_qp.min(51) as usize] as u64;
let satd_for_cost = match *candidate {
BMbCandidate::SkipOrDirect { .. } => {
let hf_clamped = psy_hf_satd_16x16(src_y).min(2048) as u64;
let mult_q8 = 256 + (hf_clamped * 3) / 8;
((satd as u64) * mult_q8) >> 8
}
BMbCandidate::Bi_16x16 { .. } => {
((satd as u64) * 230) >> 8
}
_ => satd as u64,
};
let (psy_enabled, psy_shift) = psy_rd_config();
let psy_cost = if psy_enabled {
let source_hf = psy_hf_satd_16x16(src_y) as i64;
let pred_hf = psy_hf_satd_16x16(&pred_y) as i64;
((source_hf - pred_hf).unsigned_abs() >> psy_shift) as u64
} else {
0
};
let cost = satd_for_cost + lambda * (r_bits as u64) + psy_cost;
BMbRdoResult {
candidate: *candidate,
satd,
r_bits,
cost,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::codec::h264::encoder::reconstruction::ReconBuffer;
fn make_recon(width: u32, height: u32, y_fill: u8) -> ReconFrame {
let mut buf = ReconBuffer::new(width, height).unwrap();
for v in buf.y.iter_mut() { *v = y_fill; }
for v in buf.cb.iter_mut() { *v = 128; }
for v in buf.cr.iter_mut() { *v = 128; }
ReconFrame::snapshot(&buf)
}
fn const_src_y(value: u8) -> [[u8; 16]; 16] {
let mut src = [[0u8; 16]; 16];
for row in &mut src {
for px in row {
*px = value;
}
}
src
}
#[test]
fn lambda_b_table_grows_monotonically() {
for qp in 0..51 {
assert!(
LAMBDA_TAB_B[qp] <= LAMBDA_TAB_B[qp + 1],
"non-monotonic at qp {qp}: {} > {}",
LAMBDA_TAB_B[qp], LAMBDA_TAB_B[qp + 1]
);
}
assert_eq!(LAMBDA_TAB_B[0], 5);
assert_eq!(LAMBDA_TAB_B[21], 62);
assert_eq!(LAMBDA_TAB_B[51], 1978);
}
#[test]
fn skip_or_direct_zero_mv_zero_residual_zero_satd() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let cand = BMbCandidate::SkipOrDirect {
mv_l0: MotionVector::ZERO,
mv_l1: MotionVector::ZERO,
uses_l0: true,
uses_l1: false,
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.satd, 0, "zero residual → zero SATD");
assert_eq!(res.r_bits, 6);
assert_eq!(res.cost, (LAMBDA_TAB_B[30] as u64) * 6);
}
#[test]
fn l0_16x16_emits_three_mb_type_bins_plus_mvd() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let cand = BMbCandidate::L0_16x16 {
mv_l0: MotionVector { mv_x: 0, mv_y: 0 },
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.r_bits, 16);
assert_eq!(res.satd, 0);
assert_eq!(res.cost, (LAMBDA_TAB_B[30] as u64) * 16);
}
#[test]
fn bi_16x16_rate_estimate() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let cand = BMbCandidate::Bi_16x16 {
mv_l0: MotionVector::ZERO,
mv_l1: MotionVector::ZERO,
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.r_bits, 22);
assert_eq!(res.satd, 0);
assert_eq!(res.cost, (LAMBDA_TAB_B[30] as u64) * 22);
}
#[test]
fn nonzero_residual_inflates_satd() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 200);
let l1 = make_recon(64, 64, 200);
let cand = BMbCandidate::L0_16x16 { mv_l0: MotionVector::ZERO };
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert!(res.satd > 1000, "residual MB should produce non-trivial SATD");
assert!(res.cost > (LAMBDA_TAB_B[30] as u64) * 16);
}
#[test]
fn cost_rises_with_qp_via_lambda() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let cand = BMbCandidate::L0_16x16 { mv_l0: MotionVector::ZERO };
let lo = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 21);
let hi = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 36);
assert!(hi.cost > lo.cost, "λ should grow with QP: {} > {}", hi.cost, lo.cost);
}
#[test]
fn mvd_rate_estimate_grows_with_magnitude() {
let r0 = mvd_rate_estimate(0, 0);
let r4 = mvd_rate_estimate(4, 0);
let r32 = mvd_rate_estimate(32, 0);
assert!(r0 < r4, "{r0} < {r4}");
assert!(r4 < r32, "{r4} < {r32}");
}
fn pmv(x: i16, y: i16) -> MotionVector { MotionVector { mv_x: x, mv_y: y } }
#[test]
fn partitioned_16x8_l0_l0_zero_mv_zero_satd() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let meta = crate::codec::h264::encoder::b_partitioned::partitioned_b_meta(4)
.expect("mb_type 4 valid");
let cand = BMbCandidate::Partitioned {
meta,
part0_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
part1_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.satd, 0, "zero residual → zero SATD");
assert_eq!(res.r_bits, 26);
}
#[test]
fn partitioned_8x16_bi_bi_emits_four_mvds() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let meta = crate::codec::h264::encoder::b_partitioned::partitioned_b_meta(21)
.expect("mb_type 21 valid");
let cand = BMbCandidate::Partitioned {
meta,
part0_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
part1_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.r_bits, 38);
assert_eq!(res.satd, 0);
}
#[test]
fn partitioned_l0_l1_picks_correct_reference_per_partition() {
let mut src = [[0u8; 16]; 16];
for y in 0..8 { for x in 0..16 { src[y][x] = 100; } }
for y in 8..16 { for x in 0..16 { src[y][x] = 200; } }
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 200);
let meta = crate::codec::h264::encoder::b_partitioned::partitioned_b_meta(8)
.expect("mb_type 8 valid");
let cand = BMbCandidate::Partitioned {
meta,
part0_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
part1_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.satd, 0,
"per-partition MC should pick L0 for top half, L1 for bottom");
}
#[test]
fn partitioned_picks_wrong_reference_inflates_satd() {
let mut src = [[0u8; 16]; 16];
for y in 0..8 { for x in 0..16 { src[y][x] = 100; } }
for y in 8..16 { for x in 0..16 { src[y][x] = 200; } }
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 200);
let meta = crate::codec::h264::encoder::b_partitioned::partitioned_b_meta(4)
.expect("mb_type 4 valid");
let cand = BMbCandidate::Partitioned {
meta,
part0_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
part1_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert!(res.satd > 1000,
"wrong-list MC should produce non-trivial SATD: got {}", res.satd);
}
#[test]
fn b_8x8_uniform_l0_zero_mv_zero_satd() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let zero = BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) };
let cand = BMbCandidate::B_8x8 {
sub_mb_types: [1, 1, 1, 1],
parts: [zero; 4],
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.satd, 0);
assert_eq!(res.r_bits, 60);
}
#[test]
fn b_8x8_uniform_direct_no_mvds() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let zero = BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) };
let cand = BMbCandidate::B_8x8 {
sub_mb_types: [0, 0, 0, 0],
parts: [zero; 4],
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.r_bits, 20);
assert_eq!(res.satd, 0);
}
#[test]
fn b_8x8_uniform_bi_emits_eight_mvds() {
let src = const_src_y(100);
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 100);
let zero = BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) };
let cand = BMbCandidate::B_8x8 {
sub_mb_types: [3, 3, 3, 3],
parts: [zero; 4],
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.r_bits, 84);
assert_eq!(res.satd, 0);
}
#[test]
fn b_8x8_per_sub_reference_selection() {
let mut src = [[0u8; 16]; 16];
for y in 0..8 {
for x in 0..8 { src[y][x] = 100; }
for x in 8..16 { src[y][x] = 200; }
}
for y in 8..16 {
for x in 0..8 { src[y][x] = 200; }
for x in 8..16 { src[y][x] = 100; }
}
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 200);
let zero = BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) };
let cand = BMbCandidate::B_8x8 {
sub_mb_types: [1, 2, 2, 1],
parts: [zero; 4],
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.satd, 0,
"per-sub-MB list selection should give exact match in checkerboard");
}
#[test]
fn b_8x8_wrong_per_sub_reference_inflates_satd() {
let mut src = [[0u8; 16]; 16];
for y in 0..8 {
for x in 0..8 { src[y][x] = 100; }
for x in 8..16 { src[y][x] = 200; }
}
for y in 8..16 {
for x in 0..8 { src[y][x] = 200; }
for x in 8..16 { src[y][x] = 100; }
}
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 200);
let zero = BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) };
let cand = BMbCandidate::B_8x8 {
sub_mb_types: [1, 1, 1, 1],
parts: [zero; 4],
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert!(res.satd > 1000,
"uniform L0 mode on checkerboard should produce big SATD: {}", res.satd);
}
#[test]
fn partitioned_8x16_left_right_split() {
let mut src = [[0u8; 16]; 16];
for y in 0..16 {
for x in 0..8 { src[y][x] = 100; }
for x in 8..16 { src[y][x] = 200; }
}
let l0 = make_recon(64, 64, 100);
let l1 = make_recon(64, 64, 200);
let meta = crate::codec::h264::encoder::b_partitioned::partitioned_b_meta(9)
.expect("mb_type 9 valid");
let cand = BMbCandidate::Partitioned {
meta,
part0_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
part1_mvs: BPartitionMvPair { mv_l0: pmv(0, 0), mv_l1: pmv(0, 0) },
};
let res = evaluate_b_mb_rdo(&cand, &src, &l0, &l1, 0, 0, 30);
assert_eq!(res.satd, 0,
"8x16 left=L0 / right=L1 should match split content exactly");
}
}