use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer}; use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState}; use vulkano::device::{Device, DeviceExtensions}; use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract}; use vulkano::image::{SwapchainImage, AttachmentImage}; use vulkano::instance::{Instance, PhysicalDevice, ApplicationInfo, Version, InstanceExtensions}; use vulkano::pipeline::{GraphicsPipeline}; use vulkano::pipeline::shader::{GraphicsShaderType, ShaderModule}; use vulkano::pipeline::viewport::Viewport; use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError}; use vulkano::swapchain; use vulkano::sync::{GpuFuture, FlushError}; use vulkano::sync; use vulkano::pipeline::vertex::SingleBufferDefinition; use vulkano::descriptor::PipelineLayoutAbstract; use vulkano::format::Format; use vulkano_win::VkSurfaceBuild; use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent}; use std::sync::Arc; use std::time::SystemTime; use std::path::PathBuf; use std::ffi::{CStr}; use shade_runner; use shade_runner::{CompiledShaders, Entry}; use shaderc; use crate::PushConstants; use vulkano::instance::debug::{DebugCallback, MessageTypes}; const VALIDATION_LAYERS: &[&str] = &[ "VK_LAYER_LUNARG_standard_validation" ]; #[cfg(all(debug_assertions))] const ENABLE_VALIDATION_LAYERS: bool = true; #[cfg(not(debug_assertions))] const ENABLE_VALIDATION_LAYERS: bool = false; #[derive(Default, Debug, Clone)] pub struct Vertex { pub position: [f32; 3], } vulkano::impl_vertex!(Vertex, position); pub struct GameData<'a> { pub start_time: SystemTime, pub mesh_vertices: Vec, pub line_vertices: Vec, pub push_constants: &'a mut PushConstants, pub recreate_pipeline: bool, pub aspect_ratio: f32, pub shutdown: bool, } pub fn init(mut game: GameData) { if ENABLE_VALIDATION_LAYERS { println!("Enabling validation layers..."); } let instance = { let extensions = InstanceExtensions { ext_debug_report: true, ..vulkano_win::required_extensions() }; let app_info = ApplicationInfo { application_name: Some("Asuro Editor".into()), application_version: Some(Version { major: 0, minor: 1, patch: 0 }), engine_name: Some("Asuro Rust Engine".into()), engine_version: Some(Version { major: 0, minor: 1, patch: 0 }) }; if ENABLE_VALIDATION_LAYERS { let available_layers = vulkano::instance::layers_list().unwrap().map(|layer| String::from(layer.name())).collect::>(); VALIDATION_LAYERS.iter().for_each(|wanted_layer_name| { if !available_layers.iter().any(|available_layer_name| available_layer_name == wanted_layer_name) { panic!("Validation layer not found: {:?}. Available layers: {:?}", wanted_layer_name, &available_layers.join(", ")); } }); Instance::new(Some(&app_info), &extensions, VALIDATION_LAYERS.iter().cloned()).expect("failed to create Vulkan instance") } else { Instance::new(Some(&app_info), &extensions, None).expect("failed to create Vulkan instance") } }; // lifetime of this is important, even tho it isn't used! let mut _debug_callback = None; if ENABLE_VALIDATION_LAYERS { let msg_types = MessageTypes { error: true, warning: true, performance_warning: true, information: false, debug: true, }; _debug_callback = DebugCallback::new(&instance, msg_types, |msg| { let type_str = match (msg.ty.error, msg.ty.warning, msg.ty.performance_warning, msg.ty.information, msg.ty.debug) { (true, _, _, _, _) => "!!", (_, true, _, _, _) => "!", (_, _, true, _, _) => "p", (_, _, _, true, _) => "i", _ => " " }; let layer_str = msg.layer_prefix; println!("[{}][{}]: {}", type_str, layer_str, msg.description); }).ok(); } let physical = PhysicalDevice::enumerate(&instance).next().unwrap(); println!("Using device: {} (type: {:?})", physical.name(), physical.ty()); let mut events_loop = EventsLoop::new(); let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap(); let window = surface.window(); // The next step is to choose which GPU queue will execute our draw commands. // // Devices can provide multiple queues to run commands in parallel (for example a draw queue // and a compute queue), similar to CPU threads. This is something you have to have to manage // manually in Vulkan. // // In a real-life application, we would probably use at least a graphics queue and a transfers // queue to handle data transfers in parallel. In this example we only use one queue. // // We have to choose which queues to use early on, because we will need this info very soon. let queue_family = physical.queue_families().find(|&q| { q.supports_graphics() && surface.is_supported(q).unwrap_or(false) }).unwrap(); let device_ext = DeviceExtensions { khr_swapchain: true, .. DeviceExtensions::none() }; let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext, [(queue_family, 0.5)].iter().cloned()).unwrap(); let queue = queues.next().unwrap(); let (mut swapchain, images) = { let caps = surface.capabilities(physical).unwrap(); let usage = caps.supported_usage_flags; // The alpha mode indicates how the alpha value of the final image will behave. For example // you can choose whether the window will be opaque or transparent. let alpha = caps.supported_composite_alpha.iter().next().unwrap(); // Choosing the internal format that the images will have. let format = caps.supported_formats[0].0; // The dimensions of the window, only used to initially setup the swapchain. // NOTE: // On some drivers the swapchain dimensions are specified by `caps.current_extent` and the // swapchain size must use these dimensions. // These dimensions are always the same as the window dimensions // // However other drivers dont specify a value i.e. `caps.current_extent` is `None` // These drivers will allow anything but the only sensible value is the window dimensions. // // Because for both of these cases, the swapchain needs to be the window dimensions, we just use that. let initial_dimensions = if let Some(dimensions) = window.get_inner_size() { // convert to physical pixels let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into(); [dimensions.0, dimensions.1] } else { // The window no longer exists so exit the application. panic!("idk"); }; // Please take a look at the docs for the meaning of the parameters we didn't mention. Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format, initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha, PresentMode::Fifo, true, None).unwrap() }; let mesh_vertex_buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), game.mesh_vertices.iter().cloned()).unwrap(); let line_vertex_buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), game.line_vertices.iter().cloned()).unwrap(); let render_pass = Arc::new(vulkano::single_pass_renderpass!( device.clone(), attachments: { color: { load: Clear, store: Store, format: swapchain.format(), samples: 1, }, depth: { load: Clear, store: DontCare, format: Format::D16Unorm, samples: 1, } }, pass: { color: [color], depth_stencil: {depth} } ).unwrap()); let sub_pass = Subpass::from(render_pass.clone(), 0).unwrap(); let mut pipeline = create_pipeline(device.clone(), sub_pass.clone(), "shaders/triangle.vert", "shaders/triangle.frag", false).unwrap(); let line_pipeline = create_pipeline(device.clone(), sub_pass.clone(), "shaders/line.vert", "shaders/line.frag", true).unwrap(); // Dynamic viewports allow us to recreate just the viewport when the window is resized // Otherwise we would have to recreate the whole pipeline. let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None }; // The render pass we created above only describes the layout of our framebuffers. Before we // can draw we also need to create the actual framebuffers. // // Since we need to draw to multiple images, we are going to create a different framebuffer for // each image. let mut framebuffers = window_size_dependent_setup(device.clone(), &images, render_pass.clone(), &mut dynamic_state, &mut game.aspect_ratio); // In some situations, the swapchain will become invalid by it This includes for example // when the window is resized (as the images of the swapchain will no longer match the // window's) or, on Android, when the application went to the background and goes back to the // foreground. // // In this situation, acquiring a swapchain image or presenting it will return an error. // Rendering to an image of that swapchain will not produce any error, but may or may not work. // To continue rendering, we need to recreate the swapchain by creating a new swapchain. // Here, we remember that we need to do this for the next loop iteration. let mut recreate_swapchain = false; // In the loop below we are going to submit commands to the GPU. Submitting a command produces // an object that implements the `GpuFuture` trait, which holds the resources for as long as // they are in use by the GPU. // // Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid // that, we store the submission of the previous frame here. let mut previous_frame_end = Box::new(sync::now(device.clone())) as Box; loop { // It is important to call this function from time to time, otherwise resources will keep // accumulating and you will eventually reach an out of memory error. // Calling this function polls various fences in order to determine what the GPU has // already processed, and frees the resources that are no longer needed. previous_frame_end.cleanup_finished(); // Whenever the window resizes we need to recreate everything dependent on the window size. // In this example that includes the swapchain, the framebuffers and the dynamic state viewport. if recreate_swapchain { // Get the new dimensions of the window. let dimensions = if let Some(dimensions) = window.get_inner_size() { let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into(); [dimensions.0, dimensions.1] } else { return; }; let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) { Ok(r) => r, // This error tends to happen when the user is manually resizing the window. // Simply restarting the loop is the easiest way to fix this issue. Err(SwapchainCreationError::UnsupportedDimensions) => continue, Err(err) => panic!("{:?}", err) }; swapchain = new_swapchain; // Because framebuffers contains an Arc on the old swapchain, we need to // recreate framebuffers as well. framebuffers = window_size_dependent_setup(device.clone(), &new_images, render_pass.clone(), &mut dynamic_state, &mut game.aspect_ratio); recreate_swapchain = false; } if game.recreate_pipeline { if let Some(pipeline_ok) = create_pipeline(device.clone(), sub_pass.clone(), "shaders/triangle.vert", "shaders/triangle.frag", false) { pipeline = pipeline_ok; println!("Updated pipeline."); } else { println!("Failed to update pipeline."); } game.recreate_pipeline = false; } // Before we can draw on the output, we have to *acquire* an image from the swapchain. If // no image is available (which happens if you submit draw commands too quickly), then the // function will block. // This operation returns the index of the image that we are allowed to draw upon. // // This function can block if no image is available. The parameter is an optional timeout // after which the function call will return an error. let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) { Ok(r) => r, Err(AcquireError::OutOfDate) => { recreate_swapchain = true; continue; }, Err(err) => panic!("{:?}", err) }; // Specify the color to clear the framebuffer with i.e. blue let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into(), 1f32.into()); game.update_push_constants(); // In order to draw, we have to build a *command buffer*. The command buffer object holds // the list of commands that are going to be executed. // // Building a command buffer is an expensive operation (usually a few hundred // microseconds), but it is known to be a hot path in the driver and is expected to be // optimized. // // Note that we have to pass a queue family when we create the command buffer. The command // buffer will only be executable on that given queue family. let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(device.clone(), queue.family()).unwrap() // Before we can draw, we have to *enter a render pass*. There are two methods to do // this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is // not covered here. // // The third parameter builds the list of values to clear the attachments with. The API // is similar to the list of attachments when building the framebuffers, except that // only the attachments that use `load: Clear` appear in the list. .begin_render_pass(framebuffers[image_num].clone(), false, clear_values).unwrap() // We are now inside the first subpass of the render pass. We add a draw command. // // The last two parameters contain the list of resources to pass to the shaders. // Since we used an `EmptyPipeline` object, the objects have to be `()`. .draw(pipeline.clone(), &dynamic_state, mesh_vertex_buffer.clone(), (), game.push_constants.clone()).unwrap() .draw(line_pipeline.clone(), &dynamic_state, line_vertex_buffer.clone(), (), ()).unwrap() // We leave the render pass by calling `draw_end`. Note that if we had multiple // subpasses we could have called `next_inline` (or `next_secondary`) to jump to the // next subpass. .end_render_pass().unwrap() // Finish building the command buffer by calling `build`. .build().unwrap(); let future = previous_frame_end.join(acquire_future) .then_execute(queue.clone(), command_buffer).unwrap() // The color output is now expected to contain our triangle. But in order to show it on // the screen, we have to *present* the image by calling `present`. // // This function does not actually present the image immediately. Instead it submits a // present command at the end of the queue. This means that it will only be presented once // the GPU has finished executing the command buffer that draws the triangle. .then_swapchain_present(queue.clone(), swapchain.clone(), image_num) .then_signal_fence_and_flush(); match future { Ok(future) => { previous_frame_end = Box::new(future) as Box<_>; } Err(FlushError::OutOfDate) => { recreate_swapchain = true; previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>; } Err(e) => { println!("{:?}", e); previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>; } } // Note that in more complex programs it is likely that one of `acquire_next_image`, // `command_buffer::submit`, or `present` will block for some time. This happens when the // GPU's queue is full and the driver has to wait until the GPU finished some work. // // Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a // wait would happen. Blocking may be the desired behavior, but if you don't want to // block you should spawn a separate thread dedicated to submissions. // Handling the window events in order to close the program when the user wants to close // it. events_loop.poll_events(|ev| { if !game.on_window_event(&ev) { match ev { Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => game.shutdown = true, Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true, _ => {} } } }); if game.shutdown { return; } } } /// This method is called once during initialization, then again whenever the window is resized fn window_size_dependent_setup(device: Arc, images: &[Arc>], render_pass: Arc, dynamic_state: &mut DynamicState, aspect_ratio: &mut f32) -> Vec> { let dimensions = images[0].dimensions(); *aspect_ratio = dimensions[0] as f32 / dimensions[1] as f32; let viewport = Viewport { origin: [0.0, 0.0], dimensions: [dimensions[0] as f32, dimensions[1] as f32], depth_range: 0.0 .. 1.0, }; dynamic_state.viewports = Some(vec!(viewport)); let depth_buffer = AttachmentImage::transient(device.clone(), dimensions, Format::D16Unorm).unwrap(); images.iter().map(|image| { Arc::new(Framebuffer::start(render_pass.clone()) .add(image.clone()).unwrap() .add(depth_buffer.clone()).unwrap() .build().unwrap() ) as Arc }).collect::>() } fn create_pipeline(device: Arc, sub_pass: Subpass>, vertex_shader_path: &str, fragment_shader_path: &str, is_line: bool) -> Option, Box, Arc>>> { if let Some((shader, shader_data)) = read_shader(vertex_shader_path, fragment_shader_path) { let vertex_shader_entry; let fragment_shader_entry; let vertex_shader_module; let fragment_shader_module; unsafe { vertex_shader_module = ShaderModule::from_words(device.clone(), &shader.vertex).expect("Failed to load vertex shader."); vertex_shader_entry = vertex_shader_module.graphics_entry_point( CStr::from_bytes_with_nul_unchecked(b"main\0"), shader_data.vert_input, shader_data.vert_output, shader_data.vert_layout, GraphicsShaderType::Vertex); fragment_shader_module = ShaderModule::from_words(device.clone(), &shader.fragment).expect("Failed to load fragment shader."); fragment_shader_entry = fragment_shader_module.graphics_entry_point( CStr::from_bytes_with_nul_unchecked(b"main\0"), shader_data.frag_input, shader_data.frag_output, shader_data.frag_layout, GraphicsShaderType::Fragment); }; let pipeline; if is_line { pipeline = Arc::new(GraphicsPipeline::start() .vertex_input_single_buffer::() .vertex_shader(vertex_shader_entry.clone(), ()) .line_list() .viewports_dynamic_scissors_irrelevant(1) .fragment_shader(fragment_shader_entry.clone(), ()) .render_pass(sub_pass.clone()) .build(device.clone()) .unwrap()); } else { // Before we draw we have to create what is called a pipeline. This is similar to an OpenGL // program, but much more specific. pipeline = Arc::new(GraphicsPipeline::start() .vertex_input_single_buffer::() // A Vulkan shader can in theory contain multiple entry points, so we have to specify // which one. The `main` word of `main_entry_point` actually corresponds to the name of // the entry point. .vertex_shader(vertex_shader_entry.clone(), ()) .triangle_list() // Use a resizable viewport set to draw over the entire window .viewports_dynamic_scissors_irrelevant(1) .fragment_shader(fragment_shader_entry.clone(), ()) // We have to indicate which subpass of which render pass this pipeline is going to be used // in. The pipeline will only be usable from this particular subpass. .render_pass(sub_pass.clone()) .build(device.clone()) .unwrap()); } return Some(pipeline); } else { return None; } } fn read_shader(vert_path_relative: &str, frag_path_relative: &str) -> Option<(CompiledShaders, Entry)> { let project_root = std::env::current_dir().expect("failed to get root directory"); let mut vert_path = project_root.clone(); vert_path.push(PathBuf::from(vert_path_relative)); let mut frag_path = project_root.clone(); frag_path.push(PathBuf::from(frag_path_relative)); let shader_result = shade_runner::load(vert_path, frag_path); match shader_result { Ok(shader) => { let shader_data = shade_runner::parse(&shader).expect("Failed to parse"); return Some((shader, shader_data)); } Err(shade_runner::error::Error::Compile(shade_runner::error::CompileError::Compile(shaderc::Error::CompilationError(line, error)))) => { println!("Shader line {}: {:?}", line, error); return None; } Err(error) => { println!("Shader compilation error: {:?}", error); return None; } } }