Migration to wgpu

master
Mark 2023-12-22 16:51:21 -08:00
parent d0e40965d4
commit a511a06709
Signed by: Mark
GPG Key ID: C6D63995FE72FD80
16 changed files with 2699 additions and 683 deletions

1930
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -27,10 +27,18 @@ incremental = false
rpath = false
[dependencies]
image = "0.24.7"
rand = "0.8.5"
winit = "0.28"
env_logger = "0.10"
log = "0.4"
wgpu = "0.18"
pollster = "0.3"
bytemuck = { version = "1.12", features = ["derive"] }
anyhow = "1.0"
cgmath = "0.18.0"
[dependencies.sdl2]
version = "0.36"
[dependencies.image]
version = "0.24"
default-features = false
features = ["ttf", "gfx", "bundled"]
features = ["png"]

View File

@ -1,21 +1,17 @@
use crate::DrawContext;
use crate::Drawable;
use crate::SpriteAtlas;
use crate::WorldPosition;
use crate::{physics::Cartesian, Camera, Sprite};
pub struct Doodad {
pub sprite: String,
pub pos: WorldPosition,
pub scale: u32,
pub angle: f64,
pub pos: Cartesian,
}
impl Drawable for Doodad {
fn draw(&self, dc: &mut DrawContext, sa: &SpriteAtlas) -> Result<(), String> {
let pos = self.pos.screen_position(dc);
let sprite = sa.get(&self.sprite);
sprite.draw(dc.canvas, pos, self.angle, 1.0)?;
impl Doodad {
pub fn sprite(&self, camera: &Camera) -> Sprite {
let p = self.pos - camera.pos;
return Ok(());
return Sprite {
position: (p.x, p.y),
name: self.sprite.clone(),
angle: 0.0,
};
}
}

View File

@ -1,4 +1,4 @@
use sdl2::{event::Event, keyboard::Keycode};
use winit::event::{ElementState, VirtualKeyCode};
// TODO: no boolean modification (no pub)
pub struct InputStatus {
@ -16,32 +16,12 @@ impl InputStatus {
}
}
fn handle_keyup(&mut self, keycode: Keycode) {
match keycode {
Keycode::Left => self.key_left = false,
Keycode::Right => self.key_right = false,
Keycode::Up => self.key_thrust = false,
_ => {}
}
}
fn handle_keydown(&mut self, keycode: Keycode) {
match keycode {
Keycode::Left => self.key_left = true,
Keycode::Right => self.key_right = true,
Keycode::Up => self.key_thrust = true,
_ => {}
}
}
pub fn update(&mut self, event: Event) {
match event {
Event::KeyDown {
keycode: Some(key), ..
} => self.handle_keydown(key),
Event::KeyUp {
keycode: Some(key), ..
} => self.handle_keyup(key),
pub fn process(&mut self, state: &ElementState, key: &VirtualKeyCode) {
let down = state == &ElementState::Pressed;
match key {
VirtualKeyCode::Left => self.key_left = down,
VirtualKeyCode::Right => self.key_right = down,
VirtualKeyCode::Up => self.key_thrust = down,
_ => {}
}
}

View File

@ -1,23 +1,26 @@
use sdl2::{event::Event, keyboard::Keycode, render::Canvas, video::Window};
use std::{time::Duration, time::Instant};
use anyhow::Result;
use physics::Cartesian;
use winit::{
event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
mod doodad;
mod inputstatus;
mod physics;
mod render;
mod ship;
mod sprite;
mod system;
use crate::{
doodad::Doodad, inputstatus::InputStatus, physics::Cartesian, physics::WorldPosition,
ship::Ship, ship::ShipKind, sprite::SpriteAtlas, system::System,
doodad::Doodad,
inputstatus::InputStatus,
render::GPUState,
ship::{Ship, ShipKind},
system::System,
};
trait Drawable {
// Draw this item on the screen
fn draw(&self, dc: &mut DrawContext, sa: &SpriteAtlas) -> Result<(), String>;
}
struct Camera {
pos: Cartesian,
}
@ -30,145 +33,132 @@ impl Camera {
}
}
static FTL: f64 = 1.0 / 200.0; // frame time limit
struct DrawContext<'a> {
canvas: &'a mut Canvas<Window>,
camera: Camera,
// Dimensions of the window
window_size: Cartesian,
// Position of the top-left corner of the window,
// relative to camera position.
top_left: Cartesian,
struct Sprite {
// Image to use
name: String,
// World position
position: (f64, f64),
angle: f32,
}
impl<'a> DrawContext<'a> {
fn new(canvas: &'a mut Canvas<Window>, camera: Camera) -> Self {
let mut s = Self {
canvas,
camera,
window_size: Cartesian::new(0.0, 0.0),
top_left: Cartesian::new(0.0, 0.0),
};
s.update();
return s;
struct Game {
input: InputStatus,
last_update: Instant,
player: Ship,
system: System,
camera: Camera,
}
impl Game {
fn new() -> Self {
Game {
last_update: Instant::now(),
input: InputStatus::new(),
player: Ship::new(ShipKind::Gypsum, Cartesian::new(0.0, 0.0)),
camera: Camera::new(),
system: System::new(),
}
}
fn process_input(&mut self, gpu: &ElementState, key: &VirtualKeyCode) {
self.input.process(gpu, key)
}
fn update(&mut self) {
self.window_size = Cartesian::from(self.canvas.window().size());
self.top_left = (self.window_size / 2.0) * Cartesian::new(-1.0, 1.0);
let t = self.last_update.elapsed().as_secs_f64();
println!("{:.02}", 1.0 / t);
if self.input.key_thrust {
self.player.body.thrust(50.0 * t);
}
if self.input.key_right {
self.player.body.rot(15.0 * t);
}
if self.input.key_left {
self.player.body.rot(-15.0 * t);
}
self.player.body.tick(t);
self.camera.pos = self.player.body.pos;
self.last_update = Instant::now();
}
fn sprites(&self) -> Vec<Sprite> {
let mut sprites: Vec<Sprite> = Vec::new();
sprites.append(&mut self.system.sprites(&self.camera));
sprites.push(self.player.sprite(&self.camera));
return sprites;
}
}
// Frame timing:
// <start>
// Input
// Draw
// <draw>
// Physics
// <phys>
// Wait
// <wait>
// Apply input
// <total>
#[derive(Debug, Default)]
struct PerformanceStats {
last_frame_draw: f64,
last_frame_phys: f64,
last_frame_wait: f64,
last_frame_total: f64,
}
use std::time::Instant;
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
let video_subsystem = sdl_context.video()?;
let mut stats = PerformanceStats::default();
pub async fn run() -> Result<()> {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new().build(&event_loop).unwrap();
let window = video_subsystem
.window("SDL2", 640, 480)
.position_centered()
.resizable()
.build()
.map_err(|e| e.to_string())?;
let mut gpu = GPUState::new(window).await?;
let mut game = Game::new();
let mut canvas = window
.into_canvas()
.accelerated()
.build()
.map_err(|e| e.to_string())?;
let texture_creator = canvas.texture_creator();
let sa = SpriteAtlas::new(&texture_creator)?;
let mut event_pump = sdl_context.event_pump()?;
let mut dc = DrawContext::new(&mut canvas, Camera::new());
let mut i = InputStatus::new();
let mut system = System::new();
let mut s = Ship::new(ShipKind::Gypsum, Cartesian::new(0.0, 0.0));
let mut frame_start;
let mut running = true;
while running {
frame_start = Instant::now();
dc.update();
for event in event_pump.poll_iter() {
match event {
Event::Quit { .. }
| Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => {
running = false;
event_loop.run(move |event, _, control_flow| {
match event {
Event::RedrawRequested(window_id) if window_id == gpu.window().id() => {
gpu.update();
game.update();
match gpu.render(&game.sprites()) {
Ok(_) => {}
// Reconfigure the surface if lost
Err(wgpu::SurfaceError::Lost) => gpu.resize(gpu.size),
// The system is out of memory, we should probably quit
Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
// All other errors (Outdated, Timeout) should be resolved by the next frame
Err(e) => eprintln!("{:?}", e),
}
_ => i.update(event),
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once unless we manually
// request it.
gpu.window().request_redraw();
}
Event::WindowEvent {
ref event,
window_id,
} if window_id == gpu.window.id() => {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state,
virtual_keycode: Some(key),
..
},
..
} => game.process_input(state, key),
WindowEvent::Resized(physical_size) => {
gpu.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &&mut so we have to dereference it twice
gpu.resize(**new_inner_size);
}
_ => {}
}
}
_ => {}
}
dc.camera.pos = s.body.pos;
// Draw
dc.canvas.clear();
system.draw(&mut dc, &sa)?;
s.draw(&mut dc, &sa)?;
dc.canvas.present();
stats.last_frame_draw = frame_start.elapsed().as_secs_f64();
// Physics goes here
let frame_time = frame_start.elapsed().as_secs_f64();
stats.last_frame_phys = frame_time;
// Wait
// (limit frame rate)
if frame_time < FTL {
std::thread::sleep(Duration::from_secs_f64(FTL - frame_time));
}
stats.last_frame_wait = frame_start.elapsed().as_secs_f64();
// Apply input
let frame_time = frame_start.elapsed().as_secs_f64();
s.body.tick(frame_time);
system.tick(frame_time);
if i.key_thrust {
s.body.thrust(50.0 * frame_time);
}
if i.key_right {
s.body.rot(-1.0 * frame_time);
}
if i.key_left {
s.body.rot(1.0 * frame_time);
}
stats.last_frame_total = frame_start.elapsed().as_secs_f64();
}
Ok(())
});
}
fn main() -> Result<()> {
pollster::block_on(run())?;
return Ok(());
}

View File

@ -1,11 +1,10 @@
use crate::physics::Cartesian;
use std::f64::consts::{PI, TAU};
pub struct PhysBody {
pub pos: Cartesian,
pub vel: Cartesian,
pub mass: f64,
pub angle: f64, // In radians
pub angle: f64, // In degrees
}
impl PhysBody {
@ -30,15 +29,18 @@ impl PhysBody {
/// Apply force in the direction this object is pointing.
pub fn thrust(&mut self, f: f64) {
let l = Cartesian::new(self.angle.sin(), self.angle.cos()) * f;
let l = Cartesian::new(
-self.angle.to_radians().sin(),
self.angle.to_radians().cos(),
) * f;
self.force(l);
}
// Rotate this object by `a` radians.
pub fn rot(&mut self, a: f64) {
self.angle -= a;
if self.angle.abs() > PI {
self.angle -= self.angle.signum() * TAU;
if self.angle.abs() > 180.0 {
self.angle -= self.angle.signum() * 360.0;
}
}
}

View File

@ -1,9 +1,7 @@
mod body;
mod cartesian;
mod polar;
mod worldposition;
pub use body::PhysBody;
pub use cartesian::Cartesian;
pub use polar::Polar;
pub use worldposition::WorldPosition;

View File

@ -1,48 +0,0 @@
use super::Cartesian;
use crate::DrawContext;
#[derive(Debug, Clone, Copy)]
pub struct WorldPosition {
pub pos: Cartesian, // True world position
pub par: f64, // Parallax factor
}
impl WorldPosition {
pub fn new(pos: Cartesian, par: f64) -> Self {
WorldPosition { pos: pos, par }
}
/*
fn from_screen_position(dc: &DrawContext, pos: Cartesian, par: f64) -> Self {
WorldPosition {
par,
pos: ((pos * Cartesian::new(1.0, -1.0)) + dc.top_left) * par + dc.camera.pos,
}
}
*/
/// Transform this world coordinate into a position on the screen,
/// taking parallax into account. (0, 0) is at top-left corner.
/// Returned position is this object's center.
pub fn screen_position(&self, dc: &DrawContext) -> Cartesian {
let par = self.par;
let pos: Cartesian = self.pos;
return (((pos - dc.camera.pos) / par) - dc.top_left) * Cartesian::new(1.0, -1.0);
}
/*
/// Transform this world coordinate into a position on the screen, ignoring parallax.
/// Used for debugging.
pub fn screen_position_real(&self, dc: &DrawContext) -> Cartesian {
let pos: Cartesian = self.pos;
return ((pos - dc.camera.pos) - dc.top_left) * Cartesian::new(1.0, -1.0);
}
*/
}
impl Into<Cartesian> for WorldPosition {
fn into(self) -> Cartesian {
self.pos.into()
}
}

454
src/render/gpu.rs Normal file
View File

@ -0,0 +1,454 @@
use anyhow::Result;
use bytemuck;
use cgmath::{Deg, Matrix4, Point2, Vector3};
use std::{iter, mem};
use wgpu::{self, util::DeviceExt};
use winit::{self, window::Window};
use super::texturearray::TextureArray;
use crate::Sprite;
pub struct GPUState {
device: wgpu::Device,
config: wgpu::SurfaceConfiguration,
surface: wgpu::Surface,
queue: wgpu::Queue,
pub window: Window,
pub size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
texture_array: TextureArray,
instance_buffer: wgpu::Buffer,
}
#[rustfmt::skip]
const OPENGL_TO_WGPU_MATRIX: Matrix4<f32> = Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0,
);
struct Instance {
transform: Transform,
texture_index: u32,
}
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: (self.transform.build_view_projection_matrix()).into(),
texture_index: self.texture_index,
}
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct InstanceRaw {
model: [[f32; 4]; 4],
texture_index: u32,
}
impl InstanceRaw {
fn get_size() -> u64 {
20
}
fn desc() -> wgpu::VertexBufferLayout<'static> {
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
// We need to switch from using a step mode of Vertex to Instance
// This means that our shaders will only change to use the next
// instance when the shader starts processing a new instance
step_mode: wgpu::VertexStepMode::Instance,
attributes: &[
// A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
// for each vec4. We'll have to reassemble the mat4 in the shader.
wgpu::VertexAttribute {
offset: 0,
// While our vertex shader only uses locations 0, and 1 now, in later tutorials, we'll
// be using 2, 3, and 4, for Vertex. We'll start at slot 5, not conflict with them later
shader_location: 5,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
shader_location: 6,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 7,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
shader_location: 8,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress,
shader_location: 9,
format: wgpu::VertexFormat::Uint32,
},
],
}
}
}
struct Transform {
pos: Point2<f32>,
aspect: f32, // width / height
scale: f32,
rotate: f32, // Around this object's center, in degrees measured ccw from vertical
}
impl Transform {
fn build_view_projection_matrix(&self) -> Matrix4<f32> {
// Apply aspect ratio and scale
let mut scale = Matrix4::from_nonuniform_scale(1.0, 1.0 / self.aspect, 1.0);
scale = scale * Matrix4::from_scale(self.scale);
// Our mesh starts at (0, 0), so this will rotate around the object's center.
// Note that we translate AFTER scaling.
let rotate = Matrix4::from_angle_z(Deg { 0: self.rotate });
let translate = Matrix4::from_translation(Vector3 {
x: self.pos.x,
y: self.pos.y,
z: 0.0,
});
// Order matters!
// These are applied right-to-left
return OPENGL_TO_WGPU_MATRIX * translate * rotate * scale;
}
}
// Datatype for vertex buffer
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc() -> wgpu::VertexBufferLayout<'static> {
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
],
}
}
}
// This is centered at 0,0 intentionally,
// so scaling works properly.
const VERTICES: &[Vertex] = &[
Vertex {
position: [-0.5, 0.5, 0.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [0.5, 0.5, 0.0],
tex_coords: [1.0, 0.0],
},
Vertex {
position: [0.5, -0.5, 0.0],
tex_coords: [1.0, 1.0],
},
Vertex {
position: [-0.5, -0.5, 0.0],
tex_coords: [0.0, 1.0],
},
];
const INDICES: &[u16] = &[0, 3, 2, 0, 2, 1];
impl GPUState {
// We can draw at most this many sprites on the screen.
// TODO: compile-time option
pub const SPRITE_LIMIT: u64 = 100;
pub async fn new(window: Window) -> Result<Self> {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
..Default::default()
});
let surface = unsafe { instance.create_surface(&window) }.unwrap();
// Basic setup
let device;
let queue;
let config;
{
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
})
.await
.unwrap();
(device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::TEXTURE_BINDING_ARRAY | wgpu::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
// We may need limits if we compile for wasm
limits: wgpu::Limits::default(),
label: Some("gpu device"),
},
None,
)
.await
.unwrap();
// Assume sRGB
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.filter(|f| f.is_srgb())
.filter(|f| f.has_stencil_aspect())
.next()
.unwrap_or(surface_caps.formats[0]);
config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
};
surface.configure(&device, &config);
}
// Load textures
let texture_array = TextureArray::new(&device, &queue)?;
// Render pipeline
let render_pipeline;
let render_pipeline_layout;
{
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("sprite shader"),
source: wgpu::ShaderSource::Wgsl(
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/render/shaders/",
"shader.wgsl"
))
.into(),
),
});
render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("render pipeline layout"),
bind_group_layouts: &[&texture_array.bind_group_layout],
push_constant_ranges: &[],
});
render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("render pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vertex_shader_main",
buffers: &[Vertex::desc(), InstanceRaw::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fragment_shader_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
});
}
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("vertex buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("vertex index buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsages::INDEX,
});
let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("instance buffer"),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
size: InstanceRaw::get_size() * Self::SPRITE_LIMIT,
mapped_at_creation: false,
});
return Ok(Self {
surface,
device,
queue,
config,
size,
window,
render_pipeline,
vertex_buffer,
index_buffer,
instance_buffer,
texture_array,
});
}
pub fn window(&self) -> &Window {
&self.window
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
}
}
pub fn update(&mut self) {}
pub fn render(&mut self, sprites: &Vec<Sprite>) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("sprite render encoder"),
});
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("sprite render pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
// Correct for screen aspect ratio
// (it may not be square!)
let screen_aspect = self.size.width as f32 / self.size.height as f32;
// TODO: warning when too many sprites are drawn.
let mut instances: Vec<Instance> = Vec::new();
for s in sprites {
let mut pos: Point2<f32> = (s.position.0 as f32, s.position.1 as f32).into();
// TODO: dynamic
pos.x /= 400.0;
pos.y /= 400.0;
let texture = self.texture_array.get_texture(&s.name[..]);
instances.push(Instance {
transform: Transform {
pos,
aspect: texture.aspect / screen_aspect,
scale: 0.25,
rotate: s.angle,
},
texture_index: texture.index,
})
}
// Enforce sprite limit
if sprites.len() as u64 >= Self::SPRITE_LIMIT {
// TODO: no panic, handle this better.
panic!("Sprite limit exceeded!")
}
// Write new sprite data to buffer
let instance_data: Vec<_> = instances.iter().map(Instance::to_raw).collect();
self.queue.write_buffer(
&self.instance_buffer,
0,
bytemuck::cast_slice(&instance_data),
);
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.texture_array.bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(0..INDICES.len() as u32, 0, 0..instances.len() as _);
// begin_render_pass borrows encoder mutably, so we can't call finish()
// without dropping this variable.
drop(render_pass);
self.queue.submit(iter::once(encoder.finish()));
output.present();
Ok(())
}
}

6
src/render/mod.rs Normal file
View File

@ -0,0 +1,6 @@
mod gpu;
mod rawtexture;
mod texturearray;
pub use gpu::GPUState;
pub use texturearray::Texture;

66
src/render/rawtexture.rs Normal file
View File

@ -0,0 +1,66 @@
use anyhow::Result;
use image::GenericImageView;
pub(super) struct RawTexture {
pub(super) view: wgpu::TextureView,
pub(super) dimensions: (u32, u32),
}
impl RawTexture {
pub(super) fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
}
pub(super) fn from_image(
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>,
) -> Result<Self> {
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
queue.write_texture(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1),
},
size,
);
Ok(Self { view, dimensions })
}
}

View File

@ -0,0 +1,60 @@
struct InstanceInput {
@location(5) transform_matrix_0: vec4<f32>,
@location(6) transform_matrix_1: vec4<f32>,
@location(7) transform_matrix_2: vec4<f32>,
@location(8) transform_matrix_3: vec4<f32>,
@location(9) texture_idx: u32,
};
// Vertex shader
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) tex_coords: vec2<f32>,
}
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
@location(1) index: u32,
}
@vertex
fn vertex_shader_main(
model: VertexInput,
instance: InstanceInput,
) -> VertexOutput {
let transform_matrix = mat4x4<f32>(
instance.transform_matrix_0,
instance.transform_matrix_1,
instance.transform_matrix_2,
instance.transform_matrix_3,
);
var out: VertexOutput;
out.tex_coords = model.tex_coords;
out.clip_position = transform_matrix * vec4<f32>(model.position, 1.0);
out.index = instance.texture_idx;
return out;
}
// Fragment shader
@group(0) @binding(0)
var texture_array: binding_array<texture_2d<f32>>;
@group(0) @binding(1)
var sampler_array: binding_array<sampler>;
@fragment
fn fragment_shader_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSampleLevel(
texture_array[in.index],
sampler_array[in.index],
in.tex_coords,
0.0
).rgba;
}

112
src/render/texturearray.rs Normal file
View File

@ -0,0 +1,112 @@
use anyhow::Result;
use std::{collections::HashMap, fs::File, io::Read, num::NonZeroU32};
use wgpu::BindGroupLayout;
use super::rawtexture::RawTexture;
pub struct TextureArray {
pub bind_group: wgpu::BindGroup,
pub bind_group_layout: BindGroupLayout,
texture_dims: Vec<(u32, u32)>,
texture_indices: HashMap<String, u32>,
}
const TEX: &[&str] = &["error", "gypsum", "earth", "a0"];
pub struct Texture {
pub index: u32,
pub dimensions: (u32, u32),
pub aspect: f32,
}
impl TextureArray {
pub fn get_texture(&self, name: &str) -> Texture {
let index = match self.texture_indices.get(name) {
Some(x) => *x,
None => 0, // Default texture
};
let dimensions = self.texture_dims[index as usize];
return Texture {
index,
dimensions,
aspect: dimensions.0 as f32 / dimensions.1 as f32,
};
}
pub fn new(device: &wgpu::Device, queue: &wgpu::Queue) -> Result<Self> {
let mut textures: Vec<RawTexture> = Vec::new();
let mut texture_indices: HashMap<String, u32> = HashMap::new();
let mut i = 0;
for t in TEX {
let p = format!("assets/{t}.png");
let mut f = File::open(&p)?;
let mut bytes = Vec::new();
f.read_to_end(&mut bytes)?;
textures.push(RawTexture::from_bytes(&device, &queue, &bytes, &p).unwrap());
texture_indices.insert(t.to_string(), i);
i += 1;
}
// One of these overall
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("texture_bind_group_layout"),
entries: &[
// Texture data
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: NonZeroU32::new(textures.len() as u32),
},
// Texture sampler
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: NonZeroU32::new(textures.len() as u32),
},
],
});
let views: Vec<&wgpu::TextureView> = textures.iter().map(|x| &x.view).collect();
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("texture_bind_group"),
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
// Array of all views
resource: wgpu::BindingResource::TextureViewArray(&views),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::SamplerArray(&[&sampler].repeat(views.len())),
},
],
});
return Ok(Self {
bind_group,
bind_group_layout,
texture_dims: textures.iter().map(|x| x.dimensions).collect(),
texture_indices,
});
}
}

View File

@ -1,8 +1,7 @@
use crate::physics::Cartesian;
use crate::physics::PhysBody;
use crate::DrawContext;
use crate::Drawable;
use crate::SpriteAtlas;
use crate::Camera;
use crate::Sprite;
pub enum ShipKind {
Gypsum,
@ -11,7 +10,7 @@ pub enum ShipKind {
impl ShipKind {
fn sprite(&self) -> &'static str {
match self {
Self::Gypsum => "gypsum.png",
Self::Gypsum => "gypsum",
}
}
}
@ -29,20 +28,13 @@ impl Ship {
}
}
/// Get the position of this drawable on the screen
/// (0, 0) is at top-left corner.
///
/// Returned position is this object's center.
fn screen_position(&self, dc: &DrawContext) -> Cartesian {
return ((self.body.pos - dc.camera.pos) - dc.top_left) * Cartesian::new(1.0, -1.0);
}
}
pub fn sprite(&self, camera: &Camera) -> Sprite {
let p = self.body.pos - camera.pos;
impl Drawable for Ship {
fn draw(&self, dc: &mut DrawContext, sa: &SpriteAtlas) -> Result<(), String> {
let pos = self.screen_position(dc);
let sprite = sa.get(self.kind.sprite());
sprite.draw(dc.canvas, pos, self.body.angle.to_degrees(), 1.0)?;
return Ok(());
return Sprite {
position: (p.x, p.y),
name: self.kind.sprite().to_owned(),
angle: self.body.angle as f32,
};
}
}

View File

@ -1,130 +0,0 @@
use image::io::Reader as ImageReader;
use sdl2::{
rect::{Point, Rect},
render::{Canvas, Texture, TextureCreator},
video::{Window, WindowContext},
};
use std::collections::HashMap;
use crate::physics::Cartesian;
/// A handle for a sprite inside a SpriteAtlas
pub struct Sprite<'a> {
texture: &'a Texture<'a>,
rect: Rect,
scale: f64,
}
impl<'a> Sprite<'a> {
/// Draw this sprite on the screen.
///
/// Position represents the center of the sprite
/// on-screen position, NOT in the world.
pub fn draw(
&self,
canvas: &mut Canvas<Window>,
position: Cartesian,
angle: f64,
scale: f64,
) -> Result<(), String> {
let win_size = Cartesian::from(canvas.window().size());
let scale = scale * self.scale;
// Post-scale dimensions on the screen
let width = self.rect.width() as f64 * scale;
let height = self.rect.height() as f64 * scale;
// Don't draw if we're not on the screen.
// An offset is included to ensure we're completely
// off the screen. We add the whole width intentionally.
if position.x < -1.0 * (width as f64)
|| position.x > win_size.x + width as f64
|| position.y < -1.0 * (height as f64)
|| position.y > win_size.y + height as f64
{
return Ok(());
}
let mut dest = Rect::new(0, 0, width as u32, height as u32);
dest.center_on(Point::new((position.x) as i32, (position.y) as i32));
// copy the frame to the canvas
canvas.copy_ex(
&self.texture,
Some(self.rect),
Some(dest),
angle, // angle
Point::new((width / 2.0) as i32, (height / 2.0) as i32), // Rotation center
false,
false,
)?;
return Ok(());
}
}
/// A cache of textures we use when drawing the screen.
///
/// This is implemented very carefully, since SDL2 textures have tricky lifetimes.
pub struct SpriteAtlas<'a> {
data: HashMap<String, (Texture<'a>, Rect, f64)>,
}
impl<'a> SpriteAtlas<'a> {
pub fn new(texture_creator: &'a TextureCreator<WindowContext>) -> Result<Self, String> {
let mut b = Self {
data: HashMap::new(),
};
b.load_one(texture_creator, "gypsum.png", 0.75)?;
b.load_one(texture_creator, "a0.png", 1.0)?;
b.load_one(texture_creator, "small.png", 1.0)?;
b.load_one(texture_creator, "earth.png", 1.0)?;
return Ok(b);
}
pub fn get(&'a self, name: &str) -> Sprite<'a> {
let (texture, rect, scale) = self.data.get(name).unwrap();
return Sprite {
texture,
scale: scale.clone(),
rect: rect.clone(),
};
}
fn load_one(
&mut self,
texture_creator: &'a TextureCreator<WindowContext>,
s: &str,
scale: f64,
) -> Result<(), String> {
let im = ImageReader::open(format!("assets/{s}"))
.unwrap()
.decode()
.unwrap();
let width = im.width();
let height = im.height();
let mut im = im.as_bytes().to_vec();
let surface = sdl2::surface::Surface::from_data(
&mut im,
width,
height,
width * 4,
sdl2::pixels::PixelFormatEnum::RGBA32,
)?;
let texture = texture_creator
.create_texture_from_surface(&surface)
.map_err(|e| e.to_string())?;
self.data.insert(
s.to_owned(),
(texture, Rect::new(0, 0, width, height), scale),
);
return Ok(());
}
}

View File

@ -1,189 +1,37 @@
use crate::{
physics::Cartesian, physics::Polar, physics::WorldPosition, sprite::SpriteAtlas, Doodad,
DrawContext, Drawable,
};
use rand::Rng;
use sdl2::{gfx::primitives::DrawRenderer, pixels::Color};
struct StarField {
stars: Vec<WorldPosition>,
width: f64,
height: f64,
}
impl StarField {
fn new(width: f64, height: f64, d: f64) -> Self {
let mut s = StarField {
stars: Vec::new(),
width,
height,
};
let mut num = rand::thread_rng();
let area = (width / 100.0) * (height / 100.0);
let n = (area * d) as i32;
for _ in 0..n {
s.stars.push(WorldPosition::new(
Cartesian::new(
num.gen_range(-width / 2.0..width / 2.0),
num.gen_range(-height / 2.0..height / 2.0),
),
num.gen_range(3f64..4f64),
))
}
return s;
}
fn draw_with_offset(
&self,
dc: &DrawContext,
pos_in_field: Cartesian,
offset: Cartesian,
) -> Result<(), String> {
for wp in &self.stars {
// Coordinate of star on screen,
// with (0, 0) at top left
let p: Cartesian = wp.pos.into();
let q =
((p - pos_in_field + offset) / wp.par - dc.top_left) * Cartesian::new(1.0, -1.0);
dc.canvas.filled_circle(
q.x as i16,
q.y as i16,
(5.0 - (1.0 * wp.par)) as i16,
Color::RGB(100, 100, 100),
)?;
}
return Ok(());
}
fn draw(&mut self, dc: &mut DrawContext, _sa: &SpriteAtlas) -> Result<(), String> {
let w = self.width;
let h = self.height;
let ww = w / 2.0;
let hh = h / 2.0;
// Camera position relative to the center of this field.
let pos_in_field = Cartesian::new(
dc.camera.pos.x.signum() * (((dc.camera.pos.x.abs() + ww) % w) - ww),
dc.camera.pos.y.signum() * (((dc.camera.pos.y.abs() + hh) % h) - hh),
);
// Center of this field, in world coordinates
let field_center = dc.camera.pos - pos_in_field;
// Compute tile bounds.
// We use neighboring tiles' corners instead of this tile's corner to properly account for parallax.
// If we use the current tiles corners, we'll see stars appear when a tile is drawn--parallax moves them
// into view. The bounds below guarantee that no *other* tiles' stars will be drawn inside.
//
// bound_nw is the screen position of the bottom-right corner of north-west tile
// bound_se is the screen position of the top-right corner of south-east tile
let bound_nw =
WorldPosition::new(field_center + Cartesian::new(-ww, hh), 4.0).screen_position(dc);
let bound_se =
WorldPosition::new(field_center + Cartesian::new(ww, -hh), 4.0).screen_position(dc);
// Naturally, we show tiles only if we can see their edges.
let north = bound_nw.y > 0.0;
let south = bound_se.y < dc.window_size.y;
let east = bound_se.x < dc.window_size.x;
let west = bound_nw.x > 0.0;
// Draw center tile
self.draw_with_offset(dc, pos_in_field, Cartesian::new(0.0, 0.0))?;
// Draw surrounding tiles
// (which are just offset clones of the main one)
if north {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(0.0, h))?;
}
if south {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(0.0, -h))?;
}
if east {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(w, 0.0))?;
}
if west {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(-w, 0.0))?;
}
if north && east {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(w, h))?;
}
if north && west {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(-w, h))?;
}
if south && east {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(w, -h))?;
}
if south && west {
self.draw_with_offset(dc, pos_in_field, Cartesian::new(-w, -h))?;
}
// draw_circle doesn't clean up the color it uses, so we do that here.
dc.canvas.set_draw_color(Color::RGB(0, 0, 0));
return Ok(());
}
}
use crate::{physics::Cartesian, physics::Polar, Camera, Doodad, Sprite};
pub struct System {
bodies: Vec<Box<dyn Drawable>>,
starfield: StarField,
bodies: Vec<Doodad>,
}
impl System {
pub fn new() -> Self {
let mut s = System {
bodies: Vec::new(),
starfield: StarField::new(6000.0, 6000.0, 1.0),
};
let mut s = System { bodies: Vec::new() };
s.bodies.push(Box::new(Doodad {
pos: WorldPosition::new(Cartesian::new(0.0, 0.0), 2.0),
sprite: "a0.png".to_owned(),
scale: 1,
angle: 0.0,
}));
s.bodies.push(Doodad {
pos: Cartesian::new(0.0, 0.0),
sprite: "a0".to_owned(),
});
s.bodies.push(Box::new(Doodad {
pos: WorldPosition::new(
Polar {
center: Cartesian::new(0.0, 0.0),
radius: 300.0,
angle: 31.0,
}
.into(),
1.5,
),
sprite: "earth.png".to_owned(),
scale: 1,
angle: (180f64).to_radians(),
}));
s.bodies.push(Doodad {
pos: Polar {
center: Cartesian::new(0.0, 0.0),
radius: 300.0,
angle: 31.0,
}
.into(),
sprite: "earth".to_owned(),
});
s.bodies.push(Box::new(Doodad {
pos: WorldPosition::new(Cartesian::new(1000.0, 1000.0), 2.0),
sprite: "small.png".to_owned(),
scale: 1,
angle: 0.0,
}));
s.bodies.push(Doodad {
pos: Cartesian::new(1000.0, 1000.0),
sprite: "small".to_owned(),
});
return s;
}
/// Calculate the state of this body after t seconds.
pub fn tick(&mut self, _t: f64) {
//let body = &mut self.bodies[1];
//body.pos.angle += 0.1 * t;
//body.angle -= 0.1 * t;
}
pub fn draw(&mut self, dc: &mut DrawContext, sa: &SpriteAtlas) -> Result<(), String> {
self.starfield.draw(dc, sa)?;
for body in &self.bodies {
body.draw(dc, sa)?;
}
return Ok(());
pub fn sprites(&self, camera: &Camera) -> Vec<Sprite> {
return self.bodies.iter().map(|x| x.sprite(camera)).collect();
}
}