trapezoid_core/
gpu.rs

1mod command;
2mod common;
3
4mod utils;
5#[cfg(feature = "vulkan")]
6mod vulkan;
7
8use utils::PeekableReceiver;
9#[cfg(feature = "vulkan")]
10use vulkan as backend;
11
12#[cfg(not(feature = "vulkan"))]
13mod dummy_render;
14
15#[cfg(not(feature = "vulkan"))]
16use dummy_render as backend;
17
18use crate::memory::{interrupts::InterruptRequester, BusLine, Result};
19use command::{instantiate_gp0_command, Gp0CmdType, Gp0Command};
20
21use core::fmt;
22#[cfg(feature = "vulkan")]
23use std::thread::JoinHandle;
24
25use std::{
26    ops::Range,
27    sync::{
28        atomic::{AtomicU32, Ordering},
29        mpsc, Arc,
30    },
31};
32
33use common::{DrawingTextureParams, DrawingVertex};
34
35use backend::StandardCommandBufferAllocator;
36pub use backend::{Device, GpuFuture, Image, Queue};
37
38#[cfg(feature = "vulkan")]
39use backend::{AutoCommandBufferBuilder, BlitImageInfo, CommandBufferUsage, Filter};
40
41bitflags::bitflags! {
42    #[derive(Default, Clone, Copy, Debug, PartialEq, Eq)]
43    struct GpuStat: u32 {
44        const TEXTURE_PAGE_X_BASE      = 0b00000000000000000000000000001111;
45        const TEXTURE_PAGE_Y_BASE      = 0b00000000000000000000000000010000;
46        const SEMI_TRASPARENCY         = 0b00000000000000000000000001100000;
47        const TEXTURE_PAGE_COLORS      = 0b00000000000000000000000110000000;
48        const DITHER_ENABLED           = 0b00000000000000000000001000000000;
49        const DRAWING_TO_DISPLAY_AREA  = 0b00000000000000000000010000000000;
50        const DRAWING_MASK_BIT         = 0b00000000000000000000100000000000;
51        const NO_DRAW_ON_MASK          = 0b00000000000000000001000000000000;
52        const INTERLACE_FIELD          = 0b00000000000000000010000000000000;
53        const REVERSE_FLAG             = 0b00000000000000000100000000000000;
54        const DISABLE_TEXTURE          = 0b00000000000000001000000000000000;
55        const HORIZONTAL_RESOLUTION2   = 0b00000000000000010000000000000000;
56        const HORIZONTAL_RESOLUTION1   = 0b00000000000001100000000000000000;
57        const VERTICAL_RESOLUTION      = 0b00000000000010000000000000000000;
58        const VIDEO_MODE               = 0b00000000000100000000000000000000;
59        const DISPLAY_AREA_COLOR_DEPTH = 0b00000000001000000000000000000000;
60        const VERTICAL_INTERLACE       = 0b00000000010000000000000000000000;
61        const DISPLAY_DISABLED         = 0b00000000100000000000000000000000;
62        const INTERRUPT_REQUEST        = 0b00000001000000000000000000000000;
63        const DMA_DATA_REQUEST         = 0b00000010000000000000000000000000;
64        const READY_FOR_CMD_RECV       = 0b00000100000000000000000000000000;
65        const READY_FOR_TO_SEND_VRAM   = 0b00001000000000000000000000000000;
66        const READY_FOR_DMA_RECV       = 0b00010000000000000000000000000000;
67        const DMA_DIRECTION            = 0b01100000000000000000000000000000;
68        const INTERLACE_ODD_EVEN_LINES = 0b10000000000000000000000000000000;
69    }
70}
71
72#[cfg_attr(not(feature = "vulkan"), allow(dead_code))]
73impl GpuStat {
74    fn _texture_page_coords(&self) -> (u32, u32) {
75        let x = (self.bits() & Self::TEXTURE_PAGE_X_BASE.bits()) * 64;
76        let y = (self.intersects(Self::TEXTURE_PAGE_Y_BASE) as u32) * 256;
77
78        (x, y)
79    }
80
81    fn horizontal_resolution(&self) -> u32 {
82        if self.intersects(Self::HORIZONTAL_RESOLUTION2) {
83            368
84        } else {
85            // HORIZONTAL_RESOLUTION1 is two bits:
86            // 0  (if set, Add 64 to the 256 original resoltion)
87            // 1  (if set, Multiply the current resolution by 2)
88            //
89            // result:
90            // 0: 256
91            // 1: 320
92            // 2: 512
93            // 3: 640
94            let resolution_multiplier = (self.bits() & Self::HORIZONTAL_RESOLUTION1.bits()) >> 17;
95            let resoltion = 0x100 | ((resolution_multiplier & 1) << 6);
96            resoltion << (resolution_multiplier >> 1)
97        }
98    }
99
100    // divider to get the dots per scanline
101    // dots_per_line = cycles_per_line / divider
102    fn horizontal_dots_divider(&self) -> u32 {
103        if self.intersects(Self::HORIZONTAL_RESOLUTION2) {
104            7
105        } else {
106            // we want the result to be:
107            // 0: 10
108            // 1: 8
109            // 2: 5
110            // 3: 4
111            //
112            // The second two numbers are half the first two, so we can use the
113            // second bit to divide by 2.
114            let resolution_bits = (self.bits() & Self::HORIZONTAL_RESOLUTION1.bits()) >> 17;
115
116            // 4 is the base, we add 1 if the first bit is cleared, to get 5 and 10
117            let base = 4 | ((resolution_bits & 1) ^ 1);
118            // multiply by 2 if the second bit is cleared
119            base << ((resolution_bits >> 1) ^ 1)
120        }
121    }
122
123    fn vertical_resolution(&self) -> u32 {
124        240 << (self.intersects(Self::VERTICAL_RESOLUTION)
125            && self.intersects(Self::VERTICAL_INTERLACE)) as u32
126    }
127
128    fn is_24bit_color_depth(&self) -> bool {
129        self.intersects(Self::DISPLAY_AREA_COLOR_DEPTH)
130    }
131
132    fn is_ntsc_video_mode(&self) -> bool {
133        !self.intersects(Self::VIDEO_MODE)
134    }
135
136    fn _display_enabled(&self) -> bool {
137        !self.intersects(Self::DISPLAY_DISABLED)
138    }
139
140    fn semi_transparency_mode(&self) -> u8 {
141        ((self.bits() & Self::SEMI_TRASPARENCY.bits()) >> 5) as u8
142    }
143
144    fn dither_enabled(&self) -> bool {
145        self.intersects(Self::DITHER_ENABLED)
146    }
147
148    /// Drawing commands that use textures will update gpustat
149    fn update_from_texture_params(&mut self, texture_params: &DrawingTextureParams) {
150        let x = (texture_params.tex_page_base[0] / 64) & 0xF;
151        let y = (texture_params.tex_page_base[1] / 256) & 1;
152        *self &= Self::from_bits_retain(!0x81FF);
153        *self |= Self::from_bits_retain(x);
154        *self |= Self::from_bits_retain(y << 4);
155        *self |= Self::from_bits_retain((texture_params.semi_transparency_mode as u32) << 5);
156        *self |= Self::from_bits_retain((texture_params.tex_page_color_mode as u32) << 7);
157        *self |= Self::from_bits_retain((texture_params.texture_disable as u32) << 15);
158    }
159}
160
161pub(crate) struct AtomicGpuStat {
162    stat: AtomicU32,
163}
164
165impl AtomicGpuStat {
166    fn new(stat: GpuStat) -> Self {
167        Self {
168            stat: AtomicU32::new(stat.bits()),
169        }
170    }
171
172    fn load(&self) -> GpuStat {
173        GpuStat::from_bits(self.stat.load(Ordering::Relaxed)).unwrap()
174    }
175
176    fn store(&self, stat: GpuStat) {
177        self.stat.store(stat.bits(), Ordering::Relaxed);
178    }
179
180    fn fetch_update<F>(&self, mut f: F) -> Result<GpuStat, GpuStat>
181    where
182        F: FnMut(GpuStat) -> Option<GpuStat>,
183    {
184        self.stat
185            .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |old| {
186                Some(f(GpuStat::from_bits(old).unwrap())?.bits())
187            })
188            .map(|old| GpuStat::from_bits(old).unwrap())
189            .map_err(|e| GpuStat::from_bits(e).unwrap())
190    }
191}
192
193impl fmt::Debug for AtomicGpuStat {
194    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
195        write!(f, "{:?}", self.load())
196    }
197}
198
199/// The state of the gpu at the execution of the command in the rendering thread
200/// Because the state can chanage after setting the command but before execution,
201/// we need to send the current state and keep it unmodified until the command is executed.
202#[derive(Clone, Default)]
203pub(crate) struct GpuStateSnapshot {
204    gpu_stat: GpuStat,
205
206    allow_texture_disable: bool,
207    textured_rect_flip: (bool, bool),
208
209    drawing_area_top_left: (u32, u32),
210    drawing_area_bottom_right: (u32, u32),
211    drawing_offset: (i32, i32),
212    texture_window_mask: (u32, u32),
213    texture_window_offset: (u32, u32),
214
215    vram_display_area_start: (u32, u32),
216    display_horizontal_range: (u32, u32),
217    display_vertical_range: (u32, u32),
218
219    // These are only used for handleing GP1(0x10) command, so instead of creating
220    // the values again from the individual parts, we just cache it
221    cached_gp0_e2: u32,
222    cached_gp0_e3: u32,
223    cached_gp0_e4: u32,
224    cached_gp0_e5: u32,
225}
226
227#[cfg_attr(not(feature = "vulkan"), allow(dead_code))]
228pub(crate) enum BackendCommand {
229    BlitFront {
230        full_vram: bool,
231        state_snapshot: GpuStateSnapshot,
232    },
233    DrawPolyline {
234        vertices: Vec<DrawingVertex>,
235        semi_transparent: bool,
236        state_snapshot: GpuStateSnapshot,
237    },
238    DrawPolygon {
239        vertices: Vec<DrawingVertex>,
240        texture_params: DrawingTextureParams,
241        textured: bool,
242        texture_blending: bool,
243        semi_transparent: bool,
244        state_snapshot: GpuStateSnapshot,
245    },
246    WriteVramBlock {
247        block_range: (Range<u32>, Range<u32>),
248        block: Vec<u16>,
249    },
250    VramVramBlit {
251        src: (Range<u32>, Range<u32>),
252        dst: (Range<u32>, Range<u32>),
253    },
254    VramReadBlock {
255        block_range: (Range<u32>, Range<u32>),
256    },
257    FillColor {
258        top_left: (u32, u32),
259        size: (u32, u32),
260        color: (u8, u8, u8),
261    },
262}
263
264#[cfg_attr(not(feature = "vulkan"), allow(dead_code))]
265pub(crate) struct Gpu {
266    // used for blitting to frontend
267    queue: Arc<Queue>,
268    device: Arc<Device>,
269
270    // handle the backend gpu thread
271    #[cfg(feature = "vulkan")]
272    _gpu_backend_thread_handle: JoinHandle<()>,
273
274    /// holds commands that needs extra parameter and complex, like sending
275    /// to/from VRAM, and rendering
276    current_command: Option<Box<dyn Gp0Command>>,
277    // GPUREAD channel
278    gpu_read_sender: mpsc::Sender<u32>,
279    gpu_read_receiver: PeekableReceiver<u32>,
280    // backend commands channel
281    gpu_backend_sender: mpsc::Sender<BackendCommand>,
282    // channel for front image coming from backend
283    gpu_front_image_receiver: mpsc::Receiver<Arc<Image>>,
284
285    first_frame: bool,
286    current_front_image: Option<Arc<Image>>,
287    command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
288
289    // shared GPUSTAT
290    gpu_stat: Arc<AtomicGpuStat>,
291    state_snapshot: GpuStateSnapshot,
292
293    scanline: u32,
294    dot: u32,
295    drawing_odd: bool,
296    in_vblank: bool,
297
298    cpu_cycles_counter: u32,
299}
300
301impl Gpu {
302    pub fn new(device: Arc<Device>, queue: Arc<Queue>) -> Self {
303        let (gpu_read_sender, gpu_read_receiver) = mpsc::channel();
304        #[allow(unused_variables)]
305        let (gpu_backend_sender, gpu_backend_receiver) = mpsc::channel();
306        #[allow(unused_variables)]
307        let (gpu_front_image_sender, gpu_front_image_receiver) = mpsc::channel();
308
309        let gpu_stat = Arc::new(AtomicGpuStat::new(
310            GpuStat::READY_FOR_CMD_RECV | GpuStat::READY_FOR_DMA_RECV,
311        ));
312
313        let state_snapshot = GpuStateSnapshot {
314            gpu_stat: gpu_stat.load(),
315            allow_texture_disable: false,
316            textured_rect_flip: (false, false),
317
318            drawing_area_top_left: (0, 0),
319            drawing_area_bottom_right: (0, 0),
320            drawing_offset: (0, 0),
321            texture_window_mask: (0, 0),
322            texture_window_offset: (0, 0),
323
324            cached_gp0_e2: 0,
325            cached_gp0_e3: 0,
326            cached_gp0_e4: 0,
327            cached_gp0_e5: 0,
328
329            vram_display_area_start: (0, 0),
330            display_horizontal_range: (0, 0),
331            display_vertical_range: (0, 0),
332        };
333
334        #[cfg(feature = "vulkan")]
335        let _gpu_backend_thread_handle = backend::GpuBackend::start(
336            device.clone(),
337            queue.clone(),
338            gpu_stat.clone(),
339            gpu_read_sender.clone(),
340            gpu_backend_receiver,
341            gpu_front_image_sender,
342        );
343
344        Self {
345            queue,
346            device: device.clone(),
347
348            #[cfg(feature = "vulkan")]
349            _gpu_backend_thread_handle,
350
351            current_command: None,
352            gpu_read_sender,
353            gpu_read_receiver: PeekableReceiver::new(gpu_read_receiver),
354            gpu_backend_sender,
355            gpu_front_image_receiver,
356
357            first_frame: true,
358            current_front_image: None,
359            command_buffer_allocator: Arc::new(StandardCommandBufferAllocator::new(
360                device,
361                Default::default(),
362            )),
363
364            gpu_stat,
365            state_snapshot,
366
367            scanline: 0,
368            dot: 0,
369            drawing_odd: false,
370            in_vblank: false,
371            cpu_cycles_counter: 0,
372        }
373    }
374
375    pub fn reset(&mut self) {
376        let _ = std::mem::replace(self, Self::new(self.device.clone(), self.queue.clone()));
377    }
378
379    /// returns the number of `dot_clocks`, and if `hblank_clock` occurres
380    /// when clocking the gpu for `cycles` cycles.
381    /// These clocks are used for timers.
382    pub fn clock(
383        &mut self,
384        interrupt_requester: &mut impl InterruptRequester,
385        cpu_cycles: u32,
386    ) -> (u32, bool) {
387        // The GPU clock is CPU*11/7 == 53.222400MHz
388        // The FPS is determined by the mode, NTSC is 60~Hz, PAL is 50~Hz
389        self.cpu_cycles_counter += cpu_cycles * 11;
390
391        let cycles = self.cpu_cycles_counter / 7;
392        self.cpu_cycles_counter %= 7;
393
394        let gpu_stat = self.gpu_stat.load();
395        let max_dots = if gpu_stat.is_ntsc_video_mode() {
396            3413
397        } else {
398            3406
399        };
400        let max_scanlines = if gpu_stat.is_ntsc_video_mode() {
401            263
402        } else {
403            314
404        };
405        let horizontal_dots_divider = gpu_stat.horizontal_dots_divider();
406        let vertical_resolution = gpu_stat.vertical_resolution();
407        let is_interlace = gpu_stat.intersects(GpuStat::VERTICAL_INTERLACE);
408
409        // we can't overflow the max_dots and clock for example more than one
410        // scanline at a time.
411        assert!(cycles < max_dots);
412        self.dot += cycles;
413
414        // If the increment is more than the divider, we will clock the timer by the number
415        // of times the divider fits in the increment.
416        let mut dot_clocks = cycles / horizontal_dots_divider;
417
418        // We may have extra cycles to clock for one more time.
419        // For example:
420        // - divider = 10
421        // - cycles = 15
422        // If we follow the cycles increment, we will skip one value:
423        // 0 -> 15 -> 30. We lose the increment, when we got to `20` we will lose the
424        // `dot_clock`, but with the following check, we can know that we missed it
425        // and handle it accordingly.
426        if (self.dot % horizontal_dots_divider) < (cycles % horizontal_dots_divider) {
427            dot_clocks += 1;
428        }
429
430        let mut hblank_clock = false;
431        if self.dot >= max_dots {
432            hblank_clock = true;
433            self.dot -= max_dots;
434            self.scanline += 1;
435
436            if is_interlace && vertical_resolution == 240 && self.scanline < 240 {
437                self.drawing_odd = !self.drawing_odd;
438            }
439
440            if self.scanline >= max_scanlines {
441                self.scanline = 0;
442                self.in_vblank = false;
443
444                if is_interlace && vertical_resolution == 480 {
445                    self.drawing_odd = !self.drawing_odd;
446                }
447            }
448
449            if self.scanline == 240 {
450                interrupt_requester.request_vblank();
451                self.in_vblank = true;
452            }
453        }
454
455        (dot_clocks, hblank_clock)
456    }
457
458    pub fn in_vblank(&self) -> bool {
459        self.in_vblank
460    }
461
462    #[cfg(not(feature = "vulkan"))]
463    pub fn sync_gpu_and_blit_to_front(
464        &mut self,
465        _dest_image: Arc<Image>,
466        _full_vram: bool,
467        in_future: Box<dyn GpuFuture>,
468    ) -> Box<dyn GpuFuture> {
469        in_future
470    }
471
472    #[cfg(feature = "vulkan")]
473    pub fn sync_gpu_and_blit_to_front(
474        &mut self,
475        dest_image: Arc<Image>,
476        full_vram: bool,
477        in_future: Box<dyn GpuFuture>,
478    ) -> Box<dyn GpuFuture> {
479        // if we have a previous image, then we are not in the first frame,
480        // so there should be an image in the channel.
481        if !self.first_frame {
482            // `recv` is blocking, here we will wait for the GPU to finish all drawing.
483            // FIXME: Do not block. Find a way to keep the GPU synced with minimal performance loss.
484            self.current_front_image = Some(self.gpu_front_image_receiver.recv().unwrap());
485        }
486        self.first_frame = false;
487
488        // send command for next frame from now, so when we recv later, its mostly will be ready
489        self.state_snapshot.gpu_stat = self.gpu_stat.load();
490        self.gpu_backend_sender
491            .send(BackendCommand::BlitFront {
492                full_vram,
493                state_snapshot: self.state_snapshot.clone(),
494            })
495            .unwrap();
496
497        if let Some(img) = self.current_front_image.as_ref() {
498            let mut builder: AutoCommandBufferBuilder<
499                crate::gpu::vulkan::PrimaryAutoCommandBuffer,
500            > = AutoCommandBufferBuilder::primary(
501                self.command_buffer_allocator.clone(),
502                self.queue.queue_family_index(),
503                CommandBufferUsage::OneTimeSubmit,
504            )
505            .unwrap();
506
507            builder
508                .blit_image(BlitImageInfo {
509                    filter: Filter::Nearest,
510                    ..BlitImageInfo::images(img.clone(), dest_image)
511                })
512                .unwrap();
513            let cb = builder.build().unwrap();
514
515            // TODO: remove wait
516            in_future
517                .then_execute(self.queue.clone(), cb)
518                .unwrap()
519                .then_signal_fence_and_flush()
520                .unwrap()
521                .boxed()
522        } else {
523            // we must flush the future even if we are not using it.
524            in_future
525        }
526    }
527}
528
529impl Gpu {
530    fn read_gpu_stat(&self) -> u32 {
531        let interlace_bit = (self.drawing_odd && !self.in_vblank) as u32;
532        // set by GP1(0x8)
533        let interlace_field = if self.gpu_stat.load().intersects(GpuStat::INTERLACE_FIELD) {
534            1 // always on
535        } else {
536            interlace_bit ^ 1
537        };
538
539        // Ready to receive Cmd Word
540        // Ready to receive DMA Block
541        let out = self.gpu_stat.load().bits() | (interlace_bit << 31) | (interlace_field << 13);
542        log::trace!("GPUSTAT = {:08X}", out);
543        log::trace!("GPUSTAT = {:?}", self.gpu_stat);
544        out
545    }
546
547    fn gpu_read(&mut self) -> u32 {
548        let out = self.gpu_read_receiver.try_recv();
549
550        if self.gpu_read_receiver.is_empty() {
551            self.gpu_stat
552                .fetch_update(|s| Some(s - GpuStat::READY_FOR_TO_SEND_VRAM))
553                .unwrap();
554        }
555
556        log::trace!("GPUREAD = {:08X?}", out);
557        out.unwrap_or(0)
558    }
559}
560impl Gpu {
561    /// handles creating Gp0 commands, and then when ready to be executed,
562    /// will be sent to the backend.
563    fn handle_gp0(&mut self, data: u32) {
564        log::trace!("GPU: GP0 write: {:08x}", data);
565        // if we still executing some command
566        if let Some(cmd) = self.current_command.as_mut() {
567            if cmd.still_need_params() {
568                log::trace!("gp0 extra param {:08X}", data);
569                cmd.add_param(data);
570                if !cmd.still_need_params() {
571                    let cmd = self.current_command.take().unwrap();
572
573                    self.gpu_stat
574                        .fetch_update(|s| Some(s - GpuStat::READY_FOR_DMA_RECV))
575                        .unwrap();
576
577                    log::info!("executing command {:?}", cmd.cmd_type());
578                    if let Some(backend_cmd) =
579                        cmd.exec_command(self.gpu_stat.clone(), &mut self.state_snapshot)
580                    {
581                        self.gpu_backend_sender.send(backend_cmd).unwrap();
582                    }
583
584                    // ready for next command
585                    self.gpu_stat
586                        .fetch_update(|s| {
587                            Some(s | GpuStat::READY_FOR_CMD_RECV | GpuStat::READY_FOR_DMA_RECV)
588                        })
589                        .unwrap();
590                }
591            } else {
592                unreachable!();
593            }
594        } else {
595            let mut cmd = instantiate_gp0_command(data);
596            log::info!("creating new command {:?}", cmd.cmd_type());
597            if cmd.still_need_params() {
598                self.current_command = Some(cmd);
599                self.gpu_stat
600                    .fetch_update(|s| Some(s - GpuStat::READY_FOR_CMD_RECV))
601                    .unwrap();
602            } else {
603                log::info!("executing command {:?}", cmd.cmd_type());
604                if let Some(backend_cmd) =
605                    cmd.exec_command(self.gpu_stat.clone(), &mut self.state_snapshot)
606                {
607                    self.gpu_backend_sender.send(backend_cmd).unwrap();
608                }
609            }
610        }
611    }
612
613    /// Execute instructions we can from frontend, or else send to backend.
614    /// This allows for GPU_STAT register to be synced.
615    fn handle_gp1(&mut self, data: u32) {
616        let cmd = data >> 24;
617        log::trace!("gp1 command {:02X} data: {:08X}", cmd, data);
618        match cmd {
619            0x00 => {
620                // Reset Gpu
621                // TODO: check what we need to do in reset
622                self.gpu_stat.store(
623                    GpuStat::DISPLAY_DISABLED
624                        | GpuStat::INTERLACE_FIELD
625                        | GpuStat::READY_FOR_DMA_RECV
626                        | GpuStat::READY_FOR_CMD_RECV,
627                );
628            }
629            0x01 => {
630                // Reset command fifo buffer
631
632                if let Some(cmd) = &mut self.current_command {
633                    if let Gp0CmdType::CpuToVramBlit = cmd.cmd_type() {
634                        // flush vram write
635
636                        let cmd = self.current_command.take().unwrap();
637                        // CpuToVramBlit supports interrupts, and will only send
638                        // the rows that are written to the vram.
639                        if let Some(backend_cmd) =
640                            cmd.exec_command(self.gpu_stat.clone(), &mut self.state_snapshot)
641                        {
642                            self.gpu_backend_sender.send(backend_cmd).unwrap();
643                        }
644                    }
645                }
646                self.current_command = None;
647            }
648            0x02 => {
649                // Reset IRQ
650                self.gpu_stat
651                    .fetch_update(|s| Some(s.difference(GpuStat::INTERRUPT_REQUEST)))
652                    .unwrap();
653            }
654            0x03 => {
655                // Display enable
656                self.gpu_stat
657                    .fetch_update(|s| {
658                        if data & 1 == 1 {
659                            Some(s.union(GpuStat::DISPLAY_DISABLED))
660                        } else {
661                            Some(s.difference(GpuStat::DISPLAY_DISABLED))
662                        }
663                    })
664                    .unwrap();
665            }
666            0x04 => {
667                // DMA direction
668                // TODO: should also affect GpuStat::DMA_DATA_REQUEST
669                self.gpu_stat
670                    .fetch_update(|mut s| {
671                        s.remove(GpuStat::DMA_DIRECTION);
672                        s |= GpuStat::from_bits_retain((data & 3) << 29);
673                        Some(s)
674                    })
675                    .unwrap();
676            }
677            0x05 => {
678                // Vram Start of Display area
679
680                let x = data & 0x3ff;
681                let y = (data >> 10) & 0x1ff;
682
683                self.state_snapshot.vram_display_area_start = (x, y);
684                log::info!(
685                    "vram display start area {:?}",
686                    self.state_snapshot.vram_display_area_start
687                );
688            }
689            0x06 => {
690                // Screen Horizontal Display range
691                let x1 = data & 0xfff;
692                let x2 = (data >> 12) & 0xfff;
693
694                self.state_snapshot.display_horizontal_range = (x1, x2);
695                log::info!(
696                    "display horizontal range {:?}",
697                    self.state_snapshot.display_horizontal_range
698                );
699            }
700            0x07 => {
701                // Screen Vertical Display range
702                let y1 = data & 0x1ff;
703                let y2 = (data >> 10) & 0x1ff;
704
705                self.state_snapshot.display_vertical_range = (y1, y2);
706                log::info!(
707                    "display vertical range {:?}",
708                    self.state_snapshot.display_vertical_range
709                );
710            }
711            0x08 => {
712                // Display mode
713
714                // 17-18 Horizontal Resolution 1     (0=256, 1=320, 2=512, 3=640)
715                // 19    Vertical Resolution         (0=240, 1=480, when Bit22=1)
716                // 20    Video Mode                  (0=NTSC/60Hz, 1=PAL/50Hz)
717                // 21    Display Area Color Depth    (0=15bit, 1=24bit)
718                // 22    Vertical Interlace          (0=Off, 1=On)
719                let stat_bits_17_22 = data & 0x3F;
720                let stat_bit_16_horizontal_resolution_2 = (data >> 6) & 1;
721                let stat_bit_14_reverse_flag = (data >> 7) & 1;
722                // the inverse of the vertical interlace
723                let interlace_field = ((data >> 5) & 1) ^ 1;
724
725                self.gpu_stat
726                    .fetch_update(|mut s| {
727                        s &= GpuStat::from_bits_retain(!0x7f6000);
728                        s |= GpuStat::from_bits_retain(stat_bits_17_22 << 17);
729                        s |= GpuStat::from_bits_retain(stat_bit_14_reverse_flag << 14);
730                        s |= GpuStat::from_bits_retain(stat_bit_16_horizontal_resolution_2 << 16);
731                        s |= GpuStat::from_bits_retain(interlace_field << 13);
732                        Some(s)
733                    })
734                    .unwrap();
735            }
736            0x09 => {
737                // Allow texture disable
738                self.state_snapshot.allow_texture_disable = data & 1 == 1;
739            }
740            0x10 => {
741                // GPU info
742
743                // 0x0~0xF retreive info, and the rest are mirrors
744                let info_id = data & 0xF;
745
746                let result = match info_id {
747                    2 => {
748                        // Read Texture Window setting GP0(E2h)
749                        self.state_snapshot.cached_gp0_e2
750                    }
751                    3 => {
752                        // Read Draw area top left GP0(E3h)
753                        self.state_snapshot.cached_gp0_e3
754                    }
755                    4 => {
756                        // Read Draw area bottom right GP0(E4h)
757                        self.state_snapshot.cached_gp0_e4
758                    }
759                    5 => {
760                        // Read Draw offset GP0(E5h)
761                        self.state_snapshot.cached_gp0_e5
762                    }
763                    6 => {
764                        // TODO: return old value of GPUREAD
765                        0
766                    }
767                    7 => {
768                        // GPU type
769                        2
770                    }
771                    8 => {
772                        // unknown
773                        0
774                    }
775                    _ => {
776                        // TODO: return old value of GPUREAD
777                        0
778                    }
779                };
780
781                self.gpu_read_sender.send(result).unwrap();
782            }
783            _ => todo!("gp1 command {:02X}", cmd),
784        }
785    }
786}
787
788impl BusLine for Gpu {
789    fn read_u32(&mut self, addr: u32) -> Result<u32> {
790        let r = match addr {
791            0 => self.gpu_read(),
792            4 => self.read_gpu_stat(),
793            _ => unreachable!(),
794        };
795        Ok(r)
796    }
797
798    fn write_u32(&mut self, addr: u32, data: u32) -> Result<()> {
799        match addr {
800            0 => {
801                self.handle_gp0(data);
802            }
803            4 => {
804                self.handle_gp1(data);
805            }
806            _ => unreachable!(),
807        }
808        Ok(())
809    }
810}