use anyhow::Result; use bytemuck; use cgmath::{EuclideanSpace, Point2}; use std::{iter, rc::Rc}; use wgpu; use winit::{self, window::Window}; use crate::{Camera, Sprite}; use super::{ pipeline::PipelineBuilder, texturearray::TextureArray, util::Transform, vertexbuffer::{ data::{SPRITE_INDICES, SPRITE_VERTICES}, types::{PlainVertex, SpriteInstance, StarInstance, TexturedVertex}, VertexBuffer, }, }; pub struct GPUState { device: wgpu::Device, config: wgpu::SurfaceConfiguration, surface: wgpu::Surface, queue: wgpu::Queue, pub window: Window, pub size: winit::dpi::PhysicalSize, sprite_pipeline: wgpu::RenderPipeline, starfield_pipeline: wgpu::RenderPipeline, texture_array: TextureArray, vertex_buffers: VertexBuffers, } struct VertexBuffers { sprite: Rc, starfield: Rc, } impl GPUState { // We can draw at most this many sprites on the screen. // TODO: compile-time option pub const SPRITE_LIMIT: u64 = 100; pub const STAR_LIMIT: u64 = 100; pub async fn new(window: Window) -> Result { let size = window.inner_size(); let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { backends: wgpu::Backends::all(), ..Default::default() }); let surface = unsafe { instance.create_surface(&window) }.unwrap(); // Basic setup let device; let queue; let config; { let adapter = instance .request_adapter(&wgpu::RequestAdapterOptions { power_preference: wgpu::PowerPreference::default(), compatible_surface: Some(&surface), force_fallback_adapter: false, }) .await .unwrap(); (device, queue) = adapter .request_device( &wgpu::DeviceDescriptor { features: wgpu::Features::TEXTURE_BINDING_ARRAY | wgpu::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, // We may need limits if we compile for wasm limits: wgpu::Limits::default(), label: Some("gpu device"), }, None, ) .await .unwrap(); // Assume sRGB let surface_caps = surface.get_capabilities(&adapter); let surface_format = surface_caps .formats .iter() .copied() .filter(|f| f.is_srgb()) .filter(|f| f.has_stencil_aspect()) .next() .unwrap_or(surface_caps.formats[0]); config = wgpu::SurfaceConfiguration { usage: wgpu::TextureUsages::RENDER_ATTACHMENT, format: surface_format, width: size.width, height: size.height, present_mode: surface_caps.present_modes[0], alpha_mode: surface_caps.alpha_modes[0], view_formats: vec![], }; surface.configure(&device, &config); } let vertex_buffers = VertexBuffers { sprite: Rc::new(VertexBuffer::new::( "sprite", &device, Some(SPRITE_VERTICES), Some(SPRITE_INDICES), Self::SPRITE_LIMIT, )), starfield: Rc::new(VertexBuffer::new::( "starfield", &device, None, None, Self::STAR_LIMIT, )), }; // Load textures let texture_array = TextureArray::new(&device, &queue)?; // Create render pipelines let sprite_pipeline = PipelineBuilder::new("sprite", &device) .set_shader(include_str!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/render/shaders/", "sprite.wgsl" ))) .set_format(config.format) .set_triangle(true) .set_vertex_buffer(&vertex_buffers.sprite) .set_bind_group_layouts(&[&texture_array.bind_group_layout]) .build(); let starfield_pipeline = PipelineBuilder::new("starfield", &device) .set_shader(include_str!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/render/shaders/", "starfield.wgsl" ))) .set_format(config.format) .set_triangle(false) .set_vertex_buffer(&vertex_buffers.starfield) .build(); return Ok(Self { device, config, surface, queue, window, size, sprite_pipeline, starfield_pipeline, texture_array, vertex_buffers, }); } pub fn window(&self) -> &Window { &self.window } pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { if new_size.width > 0 && new_size.height > 0 { self.size = new_size; self.config.width = new_size.width; self.config.height = new_size.height; self.surface.configure(&self.device, &self.config); } } pub fn update(&mut self) {} pub fn render( &mut self, sprites: &Vec, camera: Camera, ) -> Result<(), wgpu::SurfaceError> { let output = self.surface.get_current_texture()?; let view = output .texture .create_view(&wgpu::TextureViewDescriptor::default()); let mut encoder = self .device .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("sprite render encoder"), }); let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: Some("sprite render pass"), color_attachments: &[Some(wgpu::RenderPassColorAttachment { view: &view, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 1.0, }), store: wgpu::StoreOp::Store, }, })], depth_stencil_attachment: None, occlusion_query_set: None, timestamp_writes: None, }); // Correct for screen aspect ratio // (it may not be square!) let screen_aspect = self.size.width as f32 / self.size.height as f32; // Game coordinates (relative to camera) of ne and sw corners of screen. // Used to skip off-screen sprites. let clip_ne = Point2::from((-1.0, 1.0)) * camera.zoom; let clip_sw = Point2::from((1.0, -1.0)) * camera.zoom; let mut instances: Vec = Vec::new(); for s in sprites { let pos = s.post_parallax_position(camera) - camera.pos.to_vec(); let texture = self.texture_array.get_texture(&s.name[..]); // Game dimensions of this sprite post-scale. // // We only need height / 2 to check if we're on the screen, // but we omit the division. // This gives us a small margin, and lets us re-use the value // without an extra multiply. let height = s.height * s.scale; let width = height * texture.aspect; // Don't draw (or compute matrices for) // sprites that are off the screen if pos.x < clip_ne.x - width || pos.y > clip_ne.y + height || pos.x > clip_sw.x + width || pos.y < clip_sw.y - height { continue; } // Compute the values we need to draw // scale: combines texture scale and zoom scale. // screen_pos: position of this sprite in screen coordinates // // We can't use screen_pos to exclude off-screen sprites because // it can't account for height and width. let scale = height / camera.zoom; let screen_pos: Point2 = pos / camera.zoom; instances.push(SpriteInstance { transform: Transform { pos: screen_pos, aspect: texture.aspect, screen_aspect, rotate: s.angle, scale, } .to_matrix() .into(), texture_index: texture.index, }) } // Enforce sprite limit if sprites.len() as u64 > Self::SPRITE_LIMIT { // TODO: no panic, handle this better. panic!("Sprite limit exceeded!") } // Write new sprite data to buffer self.queue.write_buffer( &self.vertex_buffers.sprite.instances, 0, bytemuck::cast_slice(&instances), ); render_pass.set_bind_group(0, &self.texture_array.bind_group, &[]); let nstances: Vec = (-10..10) .map(|x| StarInstance { transform: cgmath::Matrix4::from_translation(cgmath::Vector3 { x: x as f32 / 10.0, y: 0.0, z: 0.0, }) .into(), }) .collect(); self.queue.write_buffer( &self.vertex_buffers.starfield.instances, 0, bytemuck::cast_slice(&nstances), ); // Starfield pipeline self.vertex_buffers.starfield.set_in_pass(&mut render_pass); render_pass.set_pipeline(&self.starfield_pipeline); render_pass.draw(0..1, 0..nstances.len() as _); // Sprite pipeline self.vertex_buffers.sprite.set_in_pass(&mut render_pass); render_pass.set_pipeline(&self.sprite_pipeline); render_pass.draw_indexed(0..SPRITE_INDICES.len() as u32, 0, 0..instances.len() as _); // begin_render_pass borrows encoder mutably, so we can't call finish() // without dropping this variable. drop(render_pass); self.queue.submit(iter::once(encoder.finish())); output.present(); Ok(()) } }