use std::iter; use debug_triangle::DEBUG_VERT; use simple_plane::generate_square; use wgpu::util::DeviceExt; mod vertex; use vertex::Vertex; mod simple_plane; use simple_plane::generate_simple_plane; mod debug_triangle; mod camera; use camera::Camera; use camera::CameraUniform; mod wave; mod texture; use winit::{ event::*, event_loop::EventLoop, keyboard::{KeyCode, PhysicalKey}, window::{Window, WindowBuilder}, }; impl Vertex { fn desc() -> wgpu::VertexBufferLayout<'static> { wgpu::VertexBufferLayout { array_stride: std::mem::size_of::() as wgpu::BufferAddress, step_mode: wgpu::VertexStepMode::Vertex, attributes: &[ wgpu::VertexAttribute { offset: 0, shader_location: 0, format: wgpu::VertexFormat::Float32x3, }, wgpu::VertexAttribute { offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, shader_location: 1, format: wgpu::VertexFormat::Float32x3, }, ], } } } // // Changed // const VERTICES: &[Vertex] = &[ // // Changed // Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A // Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B // Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397], }, // C // Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732914], }, // D // Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E // ]; // const INDICES: &[u16] = &[ // 0, 1, 4, // 1, 2, 4, // 2, 3, 4, // ]; fn creer_plan_simple(largeur: f32, hauteur: f32) -> Vec { // Vecteur pour stocker les vertices let mut vertices: Vec = Vec::new(); // Coordonnées des quatre coins du plan let coin_sup_gauche = [0.0, hauteur, 0.0]; let coin_sup_droit = [largeur, hauteur, 0.0]; let coin_inf_droit = [largeur, 0.0, 0.0]; let coin_inf_gauche = [0.0, 0.0, 0.0]; // Couleur par défaut (ajustez selon vos besoins) let couleur = [1.0, 1.0, 1.0]; // Ajout des vertices vertices.push(Vertex { position: coin_sup_gauche, color: couleur }); vertices.push(Vertex { position: coin_sup_droit, color: couleur }); vertices.push(Vertex { position: coin_inf_droit, color: couleur }); vertices.push(Vertex { position: coin_inf_gauche, color: couleur }); // Retourne le vecteur de vertices vertices } struct State<'a> { surface: wgpu::Surface<'a>, device: wgpu::Device, queue: wgpu::Queue, config: wgpu::SurfaceConfiguration, size: winit::dpi::PhysicalSize, // The window must be declared after the surface so // it gets dropped after it as the surface contains // unsafe references to the window's resources. window: &'a Window, render_pipeline: wgpu::RenderPipeline, vertex_buffer: wgpu::Buffer, //index_buffer: wgpu::Buffer, num_vertices: u32, //num_indices: u32, diffuse_bind_group: wgpu::BindGroup, diffuse_texture: texture::Texture, camera: Camera, camera_uniform: CameraUniform, camera_buffer: wgpu::Buffer, camera_bind_group: wgpu::BindGroup, } impl<'a> State<'a> { async fn new(window: &'a Window) -> State<'a> { let size = window.inner_size(); // The instance is a handle to our GPU // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { backends: wgpu::Backends::all(), ..Default::default() }); let surface = instance.create_surface(window).unwrap(); let adapter = instance .request_adapter(&wgpu::RequestAdapterOptions { power_preference: wgpu::PowerPreference::default(), compatible_surface: Some(&surface), force_fallback_adapter: false, }) .await .unwrap(); let (device, queue) = adapter .request_device( &wgpu::DeviceDescriptor { label: None, required_features: wgpu::Features::empty(), // WebGL doesn't support all of wgpu's features, so if // we're building for the web we'll have to disable some. required_limits: if cfg!(target_arch = "wasm32") { wgpu::Limits::downlevel_webgl2_defaults() } else { wgpu::Limits::default() }, }, // Some(&std::path::Path::new("trace")), // Trace path None, ) .await .unwrap(); let surface_caps = surface.get_capabilities(&adapter); //println!("{:?}", &surface_caps.present_modes); // Shader code in this tutorial assumes an Srgb surface texture. Using a different // one will result all the colors comming out darker. If you want to support non // Srgb surfaces, you'll need to account for that when drawing to the frame. let surface_format = surface_caps .formats .iter() .copied() .find(|f| f.is_srgb()) .unwrap_or(surface_caps.formats[0]); let config = wgpu::SurfaceConfiguration { usage: wgpu::TextureUsages::RENDER_ATTACHMENT, format: surface_format, width: size.width, height: size.height, present_mode: surface_caps.present_modes[0], alpha_mode: surface_caps.alpha_modes[0], desired_maximum_frame_latency: 2, view_formats: vec![], }; surface.configure(&device, &config); //Camera let camera = Camera{ eye: glam::Vec3::new(0.0, 1.0, 2.0), target: glam::Vec3::new(0.0, 0.0, 0.0), up: glam::Vec3::new(0.0, 1.0, 0.0), aspect: config.width as f32 / config.height as f32, fovy: 45.0, znear: 0.1, zfar: 100.0, }; //Camera buffer let mut camera_uniform = CameraUniform::new(); camera_uniform.update_view_proj(&camera); let camera_buffer = device.create_buffer_init( &wgpu::util::BufferInitDescriptor{ label: Some("Camera Buffer"), contents: bytemuck::cast_slice(&[camera_uniform]), usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, } ); //Camera Bind Group Layout let camera_bind_group_layout = device.create_bind_group_layout( &wgpu::BindGroupLayoutDescriptor{ entries: &[ wgpu::BindGroupLayoutEntry{ binding: 0, visibility: wgpu::ShaderStages::VERTEX, ty: wgpu::BindingType::Buffer{ min_binding_size: None, ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, }, count: None, } ], label: Some("camera_bind_group_layout"), } ); //Camera Bind Group let camera_bind_group = device.create_bind_group( &wgpu::BindGroupDescriptor{ layout: &camera_bind_group_layout, entries: &[ wgpu::BindGroupEntry{ binding: 0, resource: camera_buffer.as_entire_binding(), } ], label: Some("camera_bind_group"), } ); //Texture let diffuse_bytes = include_bytes!("happy-tree.png"); let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); //BindGroup let texture_bind_group_layout = device.create_bind_group_layout( &wgpu::BindGroupLayoutDescriptor{ label: Some("Texture Bind Group Layout"), entries: &[ wgpu::BindGroupLayoutEntry{ binding: 0, visibility: wgpu::ShaderStages::FRAGMENT, ty: wgpu::BindingType::Texture{ sample_type: wgpu::TextureSampleType::Float{filterable: true}, view_dimension: wgpu::TextureViewDimension::D2, multisampled: false, }, count: None, }, wgpu::BindGroupLayoutEntry{ binding: 1, visibility: wgpu::ShaderStages::FRAGMENT, ty: wgpu::BindingType::Sampler( wgpu::SamplerBindingType::Filtering, ), count: None, } ] } ); //Creation of the real bind group let diffuse_bind_group = device.create_bind_group( &wgpu::BindGroupDescriptor { layout: &texture_bind_group_layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), // CHANGED! }, wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), // CHANGED! } ], label: Some("diffuse_bind_group"), } ); //Shader and Pipeline let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { label: Some("Shader"), source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), }); let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("Render Pipeline Layout"), bind_group_layouts: &[ &texture_bind_group_layout, &camera_bind_group_layout, ], push_constant_ranges: &[], }); let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor{ label: Some("Render Pipeline"), layout: Some(&render_pipeline_layout), vertex: wgpu::VertexState{ module: &shader, entry_point: "vs_main", buffers: &[ Vertex::desc() ], }, fragment: Some(wgpu::FragmentState{ module: &shader, entry_point: "fs_main", targets: &[Some(wgpu::ColorTargetState{ format: config.format, blend: Some(wgpu::BlendState::REPLACE), write_mask: wgpu::ColorWrites::ALL, })], }), primitive: wgpu::PrimitiveState{ topology: wgpu::PrimitiveTopology::TriangleList, strip_index_format: None, front_face: wgpu::FrontFace::Ccw, cull_mode: Some(wgpu::Face::Back), polygon_mode: wgpu::PolygonMode::Fill, unclipped_depth: false, conservative: false, }, depth_stencil: None, multisample: wgpu::MultisampleState{ count: 1, mask: !0, alpha_to_coverage_enabled: false, }, multiview: None, }); //let (vertices, indices) = creer_plan_subdivise(1.0, 1.0, 10); //let vert = creer_plan_simple(1.0, 1.0); //let vert = generate_simple_plane(0.5, 1); //let vert = generate_square(); let vert = DEBUG_VERT; // Create a buffer with the vertex data let vertex_buffer = device.create_buffer_init( &wgpu::util::BufferInitDescriptor{ label: Some("Vertex Buffer"), contents: bytemuck::cast_slice(&vert), usage: wgpu::BufferUsages::VERTEX, } ); // let index_buffer = device.create_buffer_init( // &wgpu::util::BufferInitDescriptor{ // label: Some("Indice Buffer"), // usage: wgpu::BufferUsages::INDEX, // contents: bytemuck::cast_slice(&indices), // } // ); let num_vertices = vert.len() as u32; //let num_indices = indices.len() as u32; Self { surface, device, queue, config, size, window, render_pipeline, vertex_buffer, //index_buffer, num_vertices, //num_indices, diffuse_bind_group, diffuse_texture, //Camera camera, camera_uniform, camera_buffer, camera_bind_group, } } fn window(&self) -> &Window { self.window } pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { if new_size.width > 0 && new_size.height > 0 { self.size = new_size; self.config.width = new_size.width; self.config.height = new_size.height; self.surface.configure(&self.device, &self.config); } } #[allow(unused_variables)] fn input(&mut self, event: &WindowEvent) -> bool { false } fn update(&mut self) {} fn render(&mut self) -> Result<(), wgpu::SurfaceError> { let output = self.surface.get_current_texture()?; let view = output .texture .create_view(&wgpu::TextureViewDescriptor::default()); let mut encoder = self .device .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Render Encoder"), }); { let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: Some("Render Pass"), color_attachments: &[Some(wgpu::RenderPassColorAttachment { view: &view, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.1, g: 0.2, b: 0.3, a: 1.0, }), store: wgpu::StoreOp::Store, }, })], depth_stencil_attachment: None, occlusion_query_set: None, timestamp_writes: None, }); //Add the pipeline render_pass.set_pipeline(&self.render_pipeline); //Add the bind group for the texture render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); //Add the camera bind group render_pass.set_bind_group(1, &self.camera_bind_group, &[]); render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); //render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); //render_pass.draw_indexed(0..self.num_indices,0, 0..1); render_pass.draw(0..self.num_vertices, 0..1); } self.queue.submit(iter::once(encoder.finish())); output.present(); Ok(()) } } pub async fn run() { env_logger::init(); let event_loop = EventLoop::new().unwrap(); let window = WindowBuilder::new().build(&event_loop).unwrap(); // State::new uses async code, so we're going to wait for it to finish let mut state = State::new(&window).await; event_loop .run(move |event, control_flow| { match event { Event::WindowEvent { ref event, window_id, } if window_id == state.window().id() => { if !state.input(event) { // UPDATED! match event { WindowEvent::CloseRequested | WindowEvent::KeyboardInput { event: KeyEvent { state: ElementState::Pressed, physical_key: PhysicalKey::Code(KeyCode::Escape), .. }, .. } => control_flow.exit(), WindowEvent::Resized(physical_size) => { state.resize(*physical_size); } WindowEvent::RedrawRequested => { // This tells winit that we want another frame after this one state.window().request_redraw(); state.update(); match state.render() { Ok(_) => {} // Reconfigure the surface if it's lost or outdated Err( wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated, ) => state.resize(state.size), // The system is out of memory, we should probably quit Err(wgpu::SurfaceError::OutOfMemory) => { log::error!("OutOfMemory"); control_flow.exit(); } // This happens when the a frame takes too long to present Err(wgpu::SurfaceError::Timeout) => { log::warn!("Surface timeout") } } } _ => {} } } } _ => {} } }) .unwrap(); }