get supported surface formats
This commit is contained in:
@@ -1,30 +0,0 @@
|
||||
#include <core/buffers/gryphn_buffer.h>
|
||||
#include <core/devices/metal_output_devices.h>
|
||||
#include "metal_buffer.h"
|
||||
|
||||
GN_EXPORT gnErrorCode gnCreateBufferFn(gnBuffer* buffer, const gnOutputDevice& outputDevice) {
|
||||
if (!buffer->buffer) buffer->buffer = new gnPlatformBuffer();
|
||||
|
||||
buffer->buffer->buffer = outputDevice.outputDevice->device->newBuffer(buffer->size, MTL::ResourceStorageModeShared);
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnBufferDataFn(gnBuffer& buffer, void* data) {
|
||||
memcpy(buffer.buffer->buffer->contents(), data, buffer.size);
|
||||
}
|
||||
|
||||
GN_EXPORT void gnBufferSubDataFn(gnBuffer& buffer, gnSize offset, gnSize size, void* data) {
|
||||
memcpy((char*)buffer.buffer->buffer->contents() + offset, data, size);
|
||||
}
|
||||
GN_EXPORT void gnBufferClearDataFn(gnBuffer& buffer) {
|
||||
memcpy(buffer.buffer->buffer->contents(), 0, buffer.size);
|
||||
}
|
||||
|
||||
GN_EXPORT void gnBufferMapDataFn(gnBuffer& buffer, void** data) {
|
||||
*data = buffer.buffer->buffer->contents();
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyBufferFn(gnBuffer& buffer) {
|
||||
buffer.buffer->buffer->release();
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformBuffer {
|
||||
MTL::Buffer* buffer;
|
||||
};
|
@@ -1,46 +0,0 @@
|
||||
#include "metal_vertex_description.h"
|
||||
|
||||
// MTL::VertexDescriptor* flatVertexDescriptor = MTL::VertexDescriptor::alloc()->init();
|
||||
// auto attributes = flatVertexDescriptor->attributes();
|
||||
// attributes->object(0)->setFormat(MTL::VertexFormat::VertexFormatFloat2);
|
||||
// attributes->object(0)->setOffset(offsetof(Vertex, position));
|
||||
// attributes->object(0)->setBufferIndex(0);
|
||||
// attributes->object(1)->setFormat(MTL::VertexFormat::VertexFormatFloat3);
|
||||
// attributes->object(1)->setOffset(offsetof(Vertex, uv));
|
||||
// attributes->object(1)->setBufferIndex(0);
|
||||
// flatVertexDescriptor->layouts()->object(0)->setStride(sizeof(Vertex));
|
||||
|
||||
GN_EXPORT void gnVertexDescriptionSetBindingDescriptionFn(gnVertexDescription& vertexDescription, const gnBindingDescription& binding) {
|
||||
if (!vertexDescription.vertexDescription) vertexDescription.vertexDescription = new gnPlatformVertexDescription();
|
||||
|
||||
vertexDescription.vertexDescription->binding = binding.binding;
|
||||
vertexDescription.vertexDescription->vertexDescriptor = MTL::VertexDescriptor::alloc()->init();
|
||||
vertexDescription.vertexDescription->vertexDescriptor->layouts()->object(0)->setStride(binding.stride);
|
||||
// auto attributes = vertexDescriptor->attributes();
|
||||
// attributes->object(0)->setFormat(MTL::VertexFormat::VertexFormatFloat2);
|
||||
// attributes->object(0)->setOffset(offsetof(Vertex, position));
|
||||
// attributes->object(0)->setBufferIndex(0);
|
||||
}
|
||||
|
||||
GN_EXPORT void gnVertexDescriptionSetPropertiesCountFn(gnVertexDescription& vertexDescription, int count) {
|
||||
if (!vertexDescription.vertexDescription) vertexDescription.vertexDescription = new gnPlatformVertexDescription();
|
||||
vertexDescription.vertexDescription->descriptorCount = count;
|
||||
// this does nothing on metal but I guess imma do something with it cuz it exists
|
||||
}
|
||||
|
||||
GN_EXPORT void gnVertexDescriptionSetPropertyFn(gnVertexDescription& vertexDescription, int index, const gnVertexProperty& property) {
|
||||
if (!vertexDescription.vertexDescription) vertexDescription.vertexDescription = new gnPlatformVertexDescription();
|
||||
|
||||
auto attribute = vertexDescription.vertexDescription->vertexDescriptor->attributes()->object(index);
|
||||
attribute->setBufferIndex(0);
|
||||
|
||||
switch(property.format) {
|
||||
case GN_FLOAT: attribute->setFormat(MTL::VertexFormat::VertexFormatFloat); break;
|
||||
case GN_FLOAT2: attribute->setFormat(MTL::VertexFormat::VertexFormatFloat2); break;
|
||||
case GN_FLOAT3: attribute->setFormat(MTL::VertexFormat::VertexFormatFloat3); break;
|
||||
case GN_FLOAT4: attribute->setFormat(MTL::VertexFormat::VertexFormatFloat4); break;
|
||||
case GN_UINT: attribute->setFormat(MTL::VertexFormat::VertexFormatUInt); break;
|
||||
}
|
||||
|
||||
attribute->setOffset(property.offset);
|
||||
}
|
@@ -1,8 +0,0 @@
|
||||
#include <core/buffers/vertex_descriptions/gryphn_vertex_description.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformVertexDescription {
|
||||
MTL::VertexDescriptor* vertexDescriptor = nullptr;
|
||||
uint32_t binding;
|
||||
uint32_t descriptorCount;
|
||||
};
|
@@ -1,106 +0,0 @@
|
||||
#include <core/commands/gryphn_command.h>
|
||||
#include <core/commands/metal_command_buffer.h>
|
||||
#include <core/graphics_pipeline/metal_render_pass_frame.h>
|
||||
#include <core/graphics_pipeline/metal_graphics_pipeline.h>
|
||||
#include <core/framebuffers/metal_framebuffer.h>
|
||||
#include <core/textures/metal_texture.h>
|
||||
#include <core/buffers/metal_buffer.h>
|
||||
#include <core/metal_instance.h>
|
||||
#include <core/shaders/metal_shader_module.h>
|
||||
|
||||
GN_EXPORT gnReturnCode gnCommandBufferStartFn(gnCommandBuffer& commandBuffer) {
|
||||
// do absoluetly nothing
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnCommandBeginRenderPassFn(gnCommandBuffer& commandBuffer, const gnRenderPassFrame& frame) {
|
||||
int currentColorAttachment = 0;
|
||||
for (int i = 0; i < gnListLength(frame.framebuffer->framebufferAttachments); i++) {
|
||||
if (frame.framebuffer->framebufferAttachments[i].bindPoint == GN_COLOR_ATTACHMENT) {
|
||||
frame.framebuffer->framebuffer->framebuffer->colorAttachments()->object(currentColorAttachment)->setClearColor(MTL::ClearColor::Make(frame.clearColor.r / 255.0f, frame.clearColor.g / 255.0f, frame.clearColor.b / 255.0f, frame.clearColor.a));
|
||||
currentColorAttachment++;
|
||||
}
|
||||
}
|
||||
commandBuffer.commandBuffer->renderCommandEncoder = commandBuffer.commandBuffer->commandBuffer->renderCommandEncoder(frame.framebuffer->framebuffer->framebuffer);
|
||||
MTL::Viewport vp = {(double)frame.offset.x, (double)frame.offset.y, (double)frame.area.x, (double)frame.area.y, 0.0, 1.0};
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setViewport(vp);
|
||||
}
|
||||
GN_EXPORT void gnCommandSetGraphicsPipelineFn(gnCommandBuffer& commandBuffer, const gnGraphicsPipeline& graphicsPipeline) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setRenderPipelineState(graphicsPipeline.graphicsPipeline->renderPipelineState);
|
||||
if (graphicsPipeline.graphicsPipeline->cullMode == GN_CULL_BACKFACE)
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setCullMode(MTL::CullMode::CullModeBack);
|
||||
else if (graphicsPipeline.graphicsPipeline->cullMode == GN_CULL_FRONTFACE)
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setCullMode(MTL::CullMode::CullModeFront);
|
||||
else if (graphicsPipeline.graphicsPipeline->cullMode == GN_CULL_NONE)
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setCullMode(MTL::CullMode::CullModeNone);
|
||||
|
||||
if (graphicsPipeline.graphicsPipeline->direction == GN_CLOCKWISE)
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setFrontFacingWinding(MTL::WindingCounterClockwise);
|
||||
if (graphicsPipeline.graphicsPipeline->direction == GN_COUNTER_CLOCKWISE)
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setFrontFacingWinding(MTL::WindingClockwise);
|
||||
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setDepthStencilState(graphicsPipeline.graphicsPipeline->depthStencilState);
|
||||
}
|
||||
GN_EXPORT void gnCommandSetViewportFn(const gnCommandBuffer& commandBuffer, gnViewportDescriptionData data) {
|
||||
MTL::Viewport viewport = {
|
||||
data.offset.x, data.offset.y,
|
||||
data.size.x, data.size.y,
|
||||
data.depth.x, data.depth.y
|
||||
};
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setViewport(viewport);
|
||||
}
|
||||
GN_EXPORT void gnCommandSetScissorFn(const gnCommandBuffer& commandBuffer, gnScissorDescriptionData data) {
|
||||
MTL::ScissorRect rect = {
|
||||
data.offset.x, data.offset.y,
|
||||
data.extent.x, data.extent.y
|
||||
};
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setScissorRect(rect);
|
||||
}
|
||||
GN_EXPORT void gnCommandDrawFn(gnCommandBuffer& commandBuffer, int vertexCount, int instanceCount, int firstVertex, int firstInstance) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->drawPrimitives(MTL::PrimitiveTypeTriangle, firstVertex, vertexCount, instanceCount, firstInstance);
|
||||
}
|
||||
GN_EXPORT void gnCommandDrawIndexedFn(gnCommandBuffer& commandBuffer, gnUInt indexCount, gnUInt instanceCount, gnUInt firstIndex, gnInt vertexOffset, gnUInt firstInstance) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->drawIndexedPrimitives(MTL::PrimitiveTypeTriangle, indexCount, MTL::IndexTypeUInt16, commandBuffer.commandBuffer->boundIndexBuffer, vertexOffset, instanceCount);
|
||||
}
|
||||
GN_EXPORT void gnCommandBindBufferFn(gnCommandBuffer& commandBuffer, const gnBuffer& buffer) {
|
||||
if (buffer.bufferType == GN_VERTEX_BUFFER)
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setVertexBuffer(buffer.buffer->buffer, 0, 0);
|
||||
else if (buffer.bufferType == GN_INDEX_BUFFER)
|
||||
commandBuffer.commandBuffer->boundIndexBuffer = buffer.buffer->buffer;
|
||||
}
|
||||
GN_EXPORT void gnCommandBindBufferUniformFn(gnCommandBuffer& commandBuffer, gnGraphicsPipeline& graphicsPipeline, gnBufferUniform& uniformBuffer, gnInt set) {
|
||||
for (int i = 0; i < graphicsPipeline.graphicsPipeline->shaders.size(); i++) {
|
||||
const mtlShaderRepresentation& repr = graphicsPipeline.graphicsPipeline->shaders[i];
|
||||
if (repr.module == GN_VERTEX_SHADER_MODULE) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setVertexBuffer(uniformBuffer.buffer->buffer->buffer, 0, uniformBuffer.binding + repr.uniformBufferBinding);
|
||||
} else if (repr.module == GN_FRAGMENT_SHADER_MODULE) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setFragmentBuffer(uniformBuffer.buffer->buffer->buffer, 0, uniformBuffer.binding + repr.pushConstantBinding);
|
||||
}
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnCommandBindSamplerUniformFn(gnCommandBuffer& commandBuffer, const gnGraphicsPipeline& graphicsPipeline, const gnSamplerUniform& sampler, gnInt set) {
|
||||
for (int i = 0; i < graphicsPipeline.graphicsPipeline->shaders.size(); i++) {
|
||||
if (graphicsPipeline.graphicsPipeline->shaders[i].module == GN_FRAGMENT_SHADER_MODULE) {
|
||||
int binding = graphicsPipeline.graphicsPipeline->shaders[i].textureBindings[{(gnUInt)set, sampler.binding}];
|
||||
// std::cout << "Binding: " << binding << "\n";
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setFragmentTexture(sampler.texture->texture->texture, binding);
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setFragmentSamplerState(sampler.texture->texture->sampler, binding);
|
||||
}
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnCommandPushConstantFn(gnCommandBuffer& commandBuffer, const gnGraphicsPipeline& graphicsPipeline, const gnPushConstant& pushConstant, void* data) {
|
||||
for (int i = 0; i < graphicsPipeline.graphicsPipeline->shaders.size(); i++) {
|
||||
const mtlShaderRepresentation& repr = graphicsPipeline.graphicsPipeline->shaders[i];
|
||||
if (repr.module == GN_VERTEX_SHADER_MODULE) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setVertexBytes(data, pushConstant.size, repr.pushConstantBinding);
|
||||
} else if (repr.module == GN_FRAGMENT_SHADER_MODULE) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->setFragmentBytes(data, pushConstant.size, repr.pushConstantBinding);
|
||||
}
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnCommandEndRenderPassFn(gnCommandBuffer& commandBuffer) {
|
||||
commandBuffer.commandBuffer->renderCommandEncoder->endEncoding();
|
||||
}
|
||||
GN_EXPORT gnReturnCode gnCommandBufferEndFn(gnCommandBuffer& commandBuffer) {
|
||||
// commandBuffer.commandBuffer->commandBuffer->commit();
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,27 +0,0 @@
|
||||
#include "metal_command_buffer.h"
|
||||
#include <core/devices/metal_output_devices.h>
|
||||
#include <core/output_device/gryphn_output_device.h>
|
||||
#include <core/metal_instance.h>
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateCommandBufferFn(gnCommandBuffer* commandBuffer, const gnOutputDevice& outputDevice) {
|
||||
commandBuffer->commandBuffer = new gnPlatformCommandBuffer();
|
||||
commandBuffer->commandBuffer->outputDevice = &outputDevice;
|
||||
commandBuffer->commandBuffer->commandBuffer = outputDevice.outputDevice->commandQueue->commandBuffer();
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT gnReturnCode _gnCreateCommandBuffersFn(gnCommandBuffer* commandBuffers, gnUInt commandBufferCount, const gnOutputDevice& outputDevice) {
|
||||
for (int i = 0; i < commandBufferCount; i++) {
|
||||
commandBuffers[i].commandBuffer = new gnPlatformCommandBuffer();
|
||||
commandBuffers[i].commandBuffer->outputDevice = &outputDevice;
|
||||
commandBuffers[i].commandBuffer->commandBuffer = outputDevice.outputDevice->commandQueue->commandBuffer();
|
||||
}
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
// so imma just destroy and recreate the command buffer every frame, fuck you
|
||||
GN_EXPORT void gnCommandBufferResetFn(const gnCommandBuffer& commandBuffer) {
|
||||
commandBuffer.commandBuffer->commandBuffer->release();
|
||||
commandBuffer.commandBuffer->commandBuffer = commandBuffer.commandBuffer->outputDevice->outputDevice->commandQueue->commandBuffer();
|
||||
}
|
||||
GN_EXPORT void gnDestroyCommandBufferFn(const gnCommandBuffer& commandBuffer) {
|
||||
commandBuffer.commandBuffer->commandBuffer->release();
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
#pragma once
|
||||
#include <core/commands/gryphn_command_buffer.h>
|
||||
#include <core/graphics_pipeline/gryphn_graphics_pipeline.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformCommandBuffer {
|
||||
MTL::CommandBuffer* commandBuffer;
|
||||
MTL::RenderCommandEncoder* renderCommandEncoder;
|
||||
|
||||
MTL::Buffer* boundIndexBuffer = nullptr;
|
||||
|
||||
const gnOutputDevice* outputDevice;
|
||||
};
|
@@ -1,55 +0,0 @@
|
||||
#include <core/commands/present_command/gryphn_command_present.h>
|
||||
#include "core/sync_objects/metal_semaphore.h"
|
||||
#include "core/presentation_queue/metal_presentation_queue.h"
|
||||
#include "core/devices/metal_output_devices.h"
|
||||
#include "core/textures/metal_texture.h"
|
||||
#include "core/metal_instance.h"
|
||||
#include "bridge/metal_bridge.h"
|
||||
|
||||
GN_EXPORT gnPresentationQueueState gnCommandPresentGetValidPresentationQueueFn(gnCommandPresentData& presentCommandData) {
|
||||
return GN_VALID;
|
||||
}
|
||||
GN_EXPORT gnReturnCode gnCommandPresentFn(gnCommandPresentData& presentCommandData) {
|
||||
// dispatch_semaphore_wait(presentCommandData.semaphore->semaphore->semaphore, DISPATCH_TIME_FOREVER);
|
||||
// 2. Create the command buffer
|
||||
gnOutputDevice* outputDevice = mltGetOutputDevice(presentCommandData.presentationQueue->presentationQueue);
|
||||
MTL::CommandBuffer* commandBuffer = outputDevice->outputDevice->commandQueue->commandBuffer();
|
||||
|
||||
// 3. Add a completed handler to signal the semaphore after the GPU has completed rendering.
|
||||
__block dispatch_semaphore_t semToSignal = presentCommandData.semaphore->semaphore->semaphore;
|
||||
commandBuffer->addCompletedHandler(^(MTL::CommandBuffer* buffer) {
|
||||
// Signal the semaphore after GPU work (drawing) is complete
|
||||
dispatch_semaphore_signal(semToSignal);
|
||||
});
|
||||
|
||||
|
||||
gnInstance* instance = outputDevice->outputDevice->instance;
|
||||
MTK::View* view = outputDevice->outputDevice->contentView;
|
||||
CA::MetalDrawable* drawable = presentCommandData.presentationQueue->presentationQueue->currentDrawable;
|
||||
// if (drawable == nullptr) {
|
||||
// GN_RETURN_ERROR("drawable is null");
|
||||
// }
|
||||
|
||||
MTL::RenderPassDescriptor* desc = MTL::RenderPassDescriptor::alloc()->init();
|
||||
desc->colorAttachments()->object(0)->setTexture(drawable->texture());
|
||||
desc->colorAttachments()->object(0)->setLoadAction(MTL::LoadActionClear);
|
||||
desc->colorAttachments()->object(0)->setStoreAction(MTL::StoreActionStore);
|
||||
desc->colorAttachments()->object(0)->setClearColor(MTL::ClearColor::Make(1, 0, 0, 1));
|
||||
|
||||
auto enc = commandBuffer->renderCommandEncoder(desc);
|
||||
|
||||
enc->setRenderPipelineState(instance->instance->framebufferRenderer);
|
||||
enc->setFragmentTexture(presentCommandData.presentationQueue->images[*presentCommandData.imageIndex].texture->texture, 0);
|
||||
enc->drawPrimitives(MTL::PrimitiveTypeTriangleStrip, NS::UInteger(0), NS::UInteger(4));
|
||||
|
||||
enc->endEncoding();
|
||||
|
||||
commandBuffer->presentDrawable(drawable);
|
||||
|
||||
// // 5. Commit the command buffer
|
||||
commandBuffer->commit();
|
||||
|
||||
// 6. Wait again on the semaphore to ensure the drawable is presented and the GPU work is complete.
|
||||
dispatch_semaphore_wait(semToSignal, DISPATCH_TIME_FOREVER);
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,19 +0,0 @@
|
||||
#include "core/commands/submit_command/gryphn_command_submit.h"
|
||||
#include "core/sync_objects/metal_semaphore.h"
|
||||
#include "core/commands/metal_command_buffer.h"
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
GN_EXPORT gnPresentationQueueState gnCommandSubmitGetValidPresentationQueueFn(gnCommandSubmitData& presentCommandData) {
|
||||
return GN_VALID;
|
||||
}
|
||||
GN_EXPORT gnErrorCode gnCommandSubmitFn(gnCommandSubmitData& data, const gnFence& fence) {
|
||||
dispatch_semaphore_wait(data.waitSemaphore->semaphore->semaphore, DISPATCH_TIME_FOREVER);
|
||||
|
||||
__block dispatch_semaphore_t semToSignal = data.signalSemaphore->semaphore->semaphore;
|
||||
data.commandBuffer->commandBuffer->commandBuffer->addCompletedHandler(^void(MTL::CommandBuffer* buffer) {
|
||||
dispatch_semaphore_signal(semToSignal);
|
||||
});
|
||||
data.commandBuffer->commandBuffer->commandBuffer->commit();
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
#include <core/uniform_descriptor/gryphn_uniform_layout.h>
|
||||
|
||||
struct gnPlatformUniformLayout {
|
||||
|
||||
};
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateUniformLayoutFn(gnUniformLayout* uniformLayout, gnOutputDevice& device) {
|
||||
if (uniformLayout->uniformLayout == nullptr) uniformLayout->uniformLayout = new gnPlatformUniformLayout();
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnDestroyUniformLayoutFn(gnUniformLayout& uniformLayout) {
|
||||
|
||||
}
|
@@ -1,52 +0,0 @@
|
||||
#include "metal_framebuffer.h"
|
||||
#include "core/graphics_pipeline/metal_graphics_pipeline.h"
|
||||
#include <core/devices/metal_output_devices.h>
|
||||
#include <core/textures/metal_texture.h>
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateFramebufferAttachmentFn(gnFramebufferAttachment* attachment, gnPresentationQueue& queue) {
|
||||
attachment->framebufferAttachment = new gnPlatformFramebufferAttachment();
|
||||
// attachment->framebufferAttachment->framebufferAttachment = MTL::RenderPassAttachmentDescriptor::alloc()->init();
|
||||
// MTL::RenderPassAttachmentDescriptor* descriptor = attachment->framebufferAttachment->framebufferAttachment;
|
||||
// attachment->framebufferAttachment->framebufferAttachment->setTexture(attachment->texture->texture->texture->retain());
|
||||
// descriptor->setLoadAction(MTL::LoadActionClear);
|
||||
// descriptor->setStoreAction(MTL::StoreActionStore);
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateFramebufferFn(gnFramebuffer* framebuffer, const gnRenderPass& renderpass) {
|
||||
framebuffer->framebuffer = new gnPlatformFramebuffer();
|
||||
framebuffer->framebuffer->framebuffer = MTL::RenderPassDescriptor::alloc()->init();
|
||||
framebuffer->framebuffer->framebuffer->setRenderTargetWidth(framebuffer->size.x);
|
||||
framebuffer->framebuffer->framebuffer->setRenderTargetHeight(framebuffer->size.y);
|
||||
framebuffer->framebuffer->framebuffer->setDepthAttachment(nullptr);
|
||||
int currentColorAttachment = 0;
|
||||
for (int i = 0; i < gnListLength(framebuffer->framebufferAttachments); i++) {
|
||||
if (framebuffer->framebufferAttachments[i].bindPoint == GN_COLOR_ATTACHMENT) {
|
||||
MTL::RenderPassColorAttachmentDescriptor* colorAttachment = framebuffer->framebuffer->framebuffer->colorAttachments()->object(currentColorAttachment)->retain();
|
||||
colorAttachment->setTexture(framebuffer->framebufferAttachments[i].texture->texture->texture);
|
||||
colorAttachment->setClearColor(MTL::ClearColor::Make(1.0f, 0.0f, 0.0f, 1.0f));
|
||||
colorAttachment->setLoadAction(MTL::LoadActionClear);
|
||||
colorAttachment->setStoreAction(MTL::StoreActionStore);
|
||||
colorAttachment->release();
|
||||
currentColorAttachment++;
|
||||
} else if (framebuffer->framebufferAttachments[i].bindPoint == GN_DEPTH_STENCIL_ATTACHMENT) {
|
||||
MTL::RenderPassDepthAttachmentDescriptor* depthAttachment = framebuffer->framebuffer->framebuffer->depthAttachment()->retain();
|
||||
depthAttachment->setTexture(framebuffer->framebufferAttachments[i].texture->texture->texture);
|
||||
depthAttachment->setLoadAction(MTL::LoadActionClear);
|
||||
depthAttachment->setStoreAction(MTL::StoreActionStore);
|
||||
depthAttachment->release();
|
||||
|
||||
MTL::RenderPassStencilAttachmentDescriptor* stencilAttachment = framebuffer->framebuffer->framebuffer->stencilAttachment()->retain();
|
||||
stencilAttachment->setTexture(framebuffer->framebufferAttachments[i].texture->texture->texture);
|
||||
stencilAttachment->setLoadAction(MTL::LoadActionClear);
|
||||
stencilAttachment->setStoreAction(MTL::StoreActionStore);
|
||||
stencilAttachment->release();
|
||||
} else {
|
||||
return gnReturnError(GN_UNKNOWN_FRAMEBUFFER_ATTACHMENT, "GN_DEPTH_ATTACHMENT and GN_STENCIL_ATTACHMENT are unsupported on metal for now");
|
||||
}
|
||||
}
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnDestroyFramebufferFn(const gnFramebuffer& framebuffer) {
|
||||
framebuffer.framebuffer->framebuffer->release();
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
#include <core/framebuffers/gryphn_framebuffer.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformFramebuffer {
|
||||
MTL::RenderPassDescriptor* framebuffer;
|
||||
};
|
||||
|
||||
struct gnPlatformFramebufferAttachment {
|
||||
MTL::RenderPassAttachmentDescriptor* framebufferAttachment;
|
||||
};
|
@@ -1,134 +0,0 @@
|
||||
#include <core/graphics_pipeline/gryphn_graphics_pipeline.h>
|
||||
#include "metal_graphics_pipeline.h"
|
||||
#include <core/devices/metal_output_devices.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
#include <core/buffers/vertex_description/metal_vertex_description.h>
|
||||
#include "core/shaders/metal_shader_module.h"
|
||||
|
||||
void mtlInitGraphicsPipeline(gnGraphicsPipeline& pipeline) {
|
||||
if (pipeline.graphicsPipeline == nullptr) { pipeline.graphicsPipeline = new gnPlatformGraphicsPipeline();
|
||||
pipeline.graphicsPipeline->renderPipelineDescriptor = MTL::RenderPipelineDescriptor::alloc()->init();
|
||||
}
|
||||
}
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineSetPrimativeFn(gnGraphicsPipeline& pipeline, gnPrimative primative) {
|
||||
mtlInitGraphicsPipeline(pipeline);
|
||||
pipeline.primative = primative;
|
||||
// this is done at draw time in metal, all this shit it
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineEnableDynamicStatesFn(gnGraphicsPipeline& pipeline, const gnBool enable) {
|
||||
mtlInitGraphicsPipeline(pipeline);
|
||||
pipeline.graphicsPipeline->dynamicStatesEnabled = enable;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineEnableDynamicStateFn(gnGraphicsPipeline& pipeline, const gnDynamicState state) {
|
||||
mtlInitGraphicsPipeline(pipeline);
|
||||
pipeline.graphicsPipeline->dynamicStates.push_back(state);
|
||||
}
|
||||
GN_EXPORT void _gnGraphicsPipelineSetViewportFn(gnGraphicsPipeline& pipeline, gnUInt2 position, gnUInt2 size, gnFloat minDepth, gnFloat maxDepth) {
|
||||
mtlInitGraphicsPipeline(pipeline);
|
||||
pipeline.graphicsPipeline->position = position;
|
||||
pipeline.graphicsPipeline->size = size;
|
||||
pipeline.graphicsPipeline->minDepth = minDepth;
|
||||
pipeline.graphicsPipeline->maxDepth = maxDepth; // these functions are usless because metal already makes all this shit dynamic
|
||||
} // minDepth = 0.0f, maxDepth = 1.0f
|
||||
GN_EXPORT void gnGraphicsPipelineSetCropFn(gnGraphicsPipeline& graphicsPipeline, gnInt2 position, gnUInt2 size) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
graphicsPipeline.graphicsPipeline->stencil_position = position;
|
||||
graphicsPipeline.graphicsPipeline->stencil_size = size;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetDepthClampFn(gnGraphicsPipeline& graphicsPipeline, gnBool enableDepthClamp) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
graphicsPipeline.graphicsPipeline->enableDepthClamp = true;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetFillModeFn(gnGraphicsPipeline& graphicsPipeline, gnFillMode fillMode) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
graphicsPipeline.graphicsPipeline->fillMode = fillMode;
|
||||
// if fill mode is points than fuck you, I have to write a renderer that is going to just draw the points
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetLineWidthFn(gnGraphicsPipeline& graphicsPipeline, gnFloat lineWidth) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
graphicsPipeline.graphicsPipeline->lineWidth = lineWidth;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetCullModeFn(gnGraphicsPipeline& graphicsPipeline, gnCullMode cullMode, gnFrontFaceDirection direction) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
graphicsPipeline.graphicsPipeline->cullMode = cullMode;
|
||||
graphicsPipeline.graphicsPipeline->direction = direction;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetMultisamplingFn(gnGraphicsPipeline& graphicsPipeline, gnBool enableMultisampling) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
if (enableMultisampling) {
|
||||
std::cout << "Fuck you im not doing multisampling";
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineEnableDepthTestFn(gnGraphicsPipeline& graphicsPipeline, gnBool depthTest) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
graphicsPipeline.graphicsPipeline->depthStateDescriptor = MTL::DepthStencilDescriptor::alloc()->init();
|
||||
if (depthTest) {
|
||||
graphicsPipeline.graphicsPipeline->depthStateDescriptor->setDepthCompareFunction(MTL::CompareFunctionLess);
|
||||
graphicsPipeline.graphicsPipeline->depthStateDescriptor->setDepthWriteEnabled(true);
|
||||
} else {
|
||||
graphicsPipeline.graphicsPipeline->depthStateDescriptor->setDepthCompareFunction(MTL::CompareFunctionAlways);
|
||||
graphicsPipeline.graphicsPipeline->depthStateDescriptor->setDepthWriteEnabled(false);
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetColorBlendFn(gnGraphicsPipeline& graphicsPipeline, gnBool colorBlend) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
graphicsPipeline.graphicsPipeline->colorBlending = colorBlend;
|
||||
|
||||
if (colorBlend) {
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setBlendingEnabled(true);
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setRgbBlendOperation(MTL::BlendOperation::BlendOperationAdd);
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setAlphaBlendOperation(MTL::BlendOperation::BlendOperationAdd);
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setSourceRGBBlendFactor(MTL::BlendFactor::BlendFactorSourceAlpha);
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setSourceAlphaBlendFactor(MTL::BlendFactor::BlendFactorSourceAlpha);
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setDestinationRGBBlendFactor(MTL::BlendFactor::BlendFactorOneMinusSourceAlpha);
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setDestinationAlphaBlendFactor(MTL::BlendFactor::BlendFactorOneMinusSourceAlpha);
|
||||
}
|
||||
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetVertexDescriptionFn(gnGraphicsPipeline& graphicsPipeline, const gnVertexDescription& vertexDescription) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
// this is one of the only things that is not done at runtime in metal, i dont fucking know why
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->setVertexDescriptor(vertexDescription.vertexDescription->vertexDescriptor);
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineBindShaderFn(gnGraphicsPipeline& graphicsPipeline, const gnShader& shader) {
|
||||
mtlInitGraphicsPipeline(graphicsPipeline);
|
||||
for (int i = 0; i < gnListLength(shader.shaderModules); i++) {
|
||||
if (shader.shaderModules[i].shaderType == GN_VERTEX_SHADER_MODULE)
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->setVertexFunction(shader.shaderModules[i].shaderModule->shaderFunction);
|
||||
else if (shader.shaderModules[i].shaderType == GN_FRAGMENT_SHADER_MODULE)
|
||||
graphicsPipeline.graphicsPipeline->renderPipelineDescriptor->setFragmentFunction(shader.shaderModules[i].shaderModule->shaderFunction);
|
||||
|
||||
graphicsPipeline.graphicsPipeline->shaders.push_back({
|
||||
shader.shaderModules[i].shaderType,
|
||||
shader.shaderModules[i].shaderModule->uniformBufferOffset,
|
||||
shader.shaderModules[i].shaderModule->pushConstantOffset,
|
||||
shader.shaderModules[i].shaderModule->texturesSetBindings
|
||||
});
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetRenderPassFn(gnGraphicsPipeline& graphicsPipeline, gnRenderPass& renderpass) {
|
||||
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineAddUniformLayoutFn(gnGraphicsPipeline& graphicsPipeline, const gnUniformLayout& uniformLayout) {
|
||||
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineAddPushConstantFn(gnGraphicsPipeline& graphicsPipeline, const gnPushConstant& pushConstant) {
|
||||
|
||||
}
|
||||
GN_EXPORT gnReturnCode gnCreateGraphicsPipelineFn(gnGraphicsPipeline* graphicsPipeline, gnOutputDevice& outputDevice) {
|
||||
if (graphicsPipeline->graphicsPipeline == nullptr) graphicsPipeline->graphicsPipeline = new gnPlatformGraphicsPipeline();
|
||||
NS::Error* error = nullptr;
|
||||
graphicsPipeline->graphicsPipeline->renderPipelineDescriptor->colorAttachments()->object(0)->setPixelFormat(MTL::PixelFormatBGRA8Unorm);
|
||||
graphicsPipeline->graphicsPipeline->renderPipelineState = outputDevice.outputDevice->device->newRenderPipelineState(graphicsPipeline->graphicsPipeline->renderPipelineDescriptor, &error);
|
||||
if (!graphicsPipeline->graphicsPipeline->renderPipelineState)
|
||||
return gnReturnError(GN_FAILED_CREATE_GRAPHICS_PIPELINE, error->localizedDescription()->utf8String());
|
||||
|
||||
graphicsPipeline->graphicsPipeline->depthStencilState = outputDevice.outputDevice->device->newDepthStencilState(graphicsPipeline->graphicsPipeline->depthStateDescriptor);
|
||||
graphicsPipeline->graphicsPipeline->outputDevice = &outputDevice;
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnDestroyGraphicsPipelineFn(gnGraphicsPipeline& graphicsPipeline) {
|
||||
|
||||
}
|
@@ -1,37 +0,0 @@
|
||||
#include <core/graphics_pipeline/gryphn_graphics_pipeline.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct mtlShaderRepresentation {
|
||||
gnShaderModuleStage module;
|
||||
int uniformBufferBinding, pushConstantBinding;
|
||||
std::unordered_map<gnUInt2, gnUInt> textureBindings;
|
||||
};
|
||||
|
||||
struct gnPlatformGraphicsPipeline {
|
||||
MTL::RenderPipelineState* renderPipelineState = nullptr;
|
||||
MTL::RenderPipelineDescriptor* renderPipelineDescriptor = nullptr;
|
||||
MTL::DepthStencilDescriptor* depthStateDescriptor = nullptr;
|
||||
gnOutputDevice* outputDevice;
|
||||
std::vector<mtlShaderRepresentation> shaders = {};
|
||||
MTL::DepthStencilState* depthStencilState;
|
||||
|
||||
gnBool dynamicStatesEnabled = false;
|
||||
std::vector<gnDynamicState> dynamicStates = {};
|
||||
|
||||
// for the viewport
|
||||
gnUInt2 position = { 0, 0 };
|
||||
gnUInt2 size = { 100, 100 };
|
||||
gnFloat minDepth = 0.0f, maxDepth = 1.0f;
|
||||
|
||||
// for the stencil
|
||||
gnInt2 stencil_position;
|
||||
gnUInt2 stencil_size;
|
||||
|
||||
gnBool enableDepthClamp;
|
||||
gnFillMode fillMode;
|
||||
gnFloat lineWidth;
|
||||
|
||||
gnCullMode cullMode;
|
||||
gnFrontFaceDirection direction;
|
||||
gnBool colorBlending;
|
||||
};
|
@@ -1,36 +0,0 @@
|
||||
#include <core/graphics_pipeline/gryphn_render_pass.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
// okay so my understanding is that there is no such thing as a MTL::Subpass or any alternative
|
||||
// so instread every render pass is instread going to be an array of render passes and
|
||||
// im going to find a way to implment error handling if the user attempts to to try and access different
|
||||
// resources or something like that
|
||||
struct gnPlatformRenderPass {
|
||||
// MTL::RenderPassDescriptor* renderPassDescriptor = nullptr;
|
||||
};
|
||||
struct gnPlatformSubpass {};
|
||||
struct gnPlatformRenderpassAttachment {};
|
||||
|
||||
// all this function is resposible for is making sure that youre whole render pass will be valid when
|
||||
// created at runtime, I fucking hate you metal more than vulkan, I wish everything was explicit like
|
||||
// vulkan, can I just define everything at compile time please
|
||||
GN_EXPORT gnReturnCode gnCreateRenderPassFn(gnRenderPass* renderPass, const gnOutputDevice& outputDevice) {
|
||||
// renderPass->renderpass = new gnPlatformRenderPass();
|
||||
// renderPass->renderpass->renderPassDescriptor = MTL::RenderPassDescriptor::alloc()->init();
|
||||
|
||||
for (int i = 0; i < renderPass->attachmentCount; i++) {
|
||||
renderPass->attachments[i].renderpassAttachment = new gnPlatformRenderpassAttachment();
|
||||
if (renderPass->attachments[i].colorMode == GN_RGBA8) {}
|
||||
else if (renderPass->attachments[i].colorMode == GN_DEPTH8_STENCIL24) {}
|
||||
else {
|
||||
std::string return_code = "GN_RENDERPASS_ATTATCHMENT_(" + std::to_string(i) + ")" + "_UNSUPPORTED_COLOR_MODE";
|
||||
return gnReturnError(GN_UNKNOWN_COLOR_FORMAT, return_code.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnDestroyRenderPassFn(gnRenderPass& renderPass) {
|
||||
//renderPass.renderpass->renderPassDescriptor->release();
|
||||
return;
|
||||
}
|
@@ -1,32 +0,0 @@
|
||||
#include <core/graphics_pipeline/gryphn_render_pass_frame.h>
|
||||
#include "metal_render_pass_frame.h"
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
gnPlatformRenderPassFrame::gnPlatformRenderPassFrame() {
|
||||
// renderPassDescriptor = MTL::RenderPassDescriptor::alloc()->init();
|
||||
}
|
||||
|
||||
GN_EXPORT void gnRenderPassFrameSetRenderPassFn(gnRenderPassFrame& frame, const gnRenderPass& renderPass) {
|
||||
if (frame.renderPassFrame == nullptr) frame.renderPassFrame = new gnPlatformRenderPassFrame();
|
||||
// this is going to do nothing
|
||||
}
|
||||
GN_EXPORT void gnRenderPassFrameSetFramebufferFn(gnRenderPassFrame& frame, const gnFramebuffer& framebuffer) {
|
||||
if (frame.renderPassFrame == nullptr) frame.renderPassFrame = new gnPlatformRenderPassFrame();
|
||||
std::cout << "gnRenderPassFrameSetFramebufferFn on Metal is not implemented, il do this at some point\n";
|
||||
// this will do something with setting the attachment descriptions but im kinda lazy
|
||||
// and by lazy I mean gnFramebuffer_metal_impl has not been created yet
|
||||
}
|
||||
GN_EXPORT void gnRenderPassFrameSetOffsetFn(gnRenderPassFrame& frame, const gnUInt2& offset) {
|
||||
if (frame.renderPassFrame == nullptr) frame.renderPassFrame = new gnPlatformRenderPassFrame();
|
||||
//frame.renderPassFrame->renderPassDescriptor->
|
||||
if (offset.x != 0 || offset.y != 0) std::cout << "gnRenderPassFrameOffsetFn offset must be zero on metal\n";
|
||||
}
|
||||
GN_EXPORT void gnRenderPassFrameSetRenderAreaFn(gnRenderPassFrame& frame, const gnUInt2& area){
|
||||
if (frame.renderPassFrame == nullptr) frame.renderPassFrame = new gnPlatformRenderPassFrame();
|
||||
frame.renderPassFrame->renderPassDescriptor->setRenderTargetWidth(area.x);
|
||||
frame.renderPassFrame->renderPassDescriptor->setRenderTargetHeight(area.y);
|
||||
}
|
||||
GN_EXPORT void gnRenderPassFrameSetClearColorFn(gnRenderPassFrame& frame, gnColor clearColor) {
|
||||
if (frame.renderPassFrame == nullptr) frame.renderPassFrame = new gnPlatformRenderPassFrame();
|
||||
std::cout << "gnRenderPassFrameSetClearColorFn does nothing on metal cuz imma bitch\n";
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
#include <core/graphics_pipeline/gryphn_render_pass_frame.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformRenderPassFrame {
|
||||
// ive been told I can recreate this jazz every frame so im going to do that, screw you
|
||||
MTL::RenderPassDescriptor* renderPassDescriptor = nullptr;
|
||||
MTL::RenderCommandEncoder* renderCommandEncoder = nullptr;
|
||||
|
||||
gnColor clearColor;
|
||||
|
||||
gnPlatformRenderPassFrame();
|
||||
};
|
@@ -1,120 +0,0 @@
|
||||
#include "metal_shader_module.h"
|
||||
#include "spirv_msl.hpp"
|
||||
#include "core/devices/metal_output_devices.h"
|
||||
#include "core/shaders/gryphn_shader.h"
|
||||
|
||||
static uint32_t* chars_to_uint32s(const char* chars, size_t num_chars) {
|
||||
if (chars == NULL || num_chars == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Determine the number of uint32_t elements needed.
|
||||
// Round up in case the number of chars isn't a multiple of 4.
|
||||
size_t num_uint32s = (num_chars + 3) / 4;
|
||||
|
||||
// Allocate memory for the uint32_t array.
|
||||
uint32_t* uint32s = (uint32_t*)malloc(num_uint32s * sizeof(uint32_t));
|
||||
if (uint32s == NULL) {
|
||||
return NULL; // Allocation failed
|
||||
}
|
||||
|
||||
// Initialize the uint32_t array to 0.
|
||||
for (size_t i = 0; i < num_uint32s; ++i) {
|
||||
uint32s[i] = 0;
|
||||
}
|
||||
|
||||
// Iterate through the char array and build uint32_t values.
|
||||
for (size_t i = 0; i < num_chars; ++i) {
|
||||
size_t uint32_index = i / 4;
|
||||
int shift = 8 * (i % 4);
|
||||
uint32s[uint32_index] |= (uint32_t)(unsigned char)chars[i] << shift;
|
||||
}
|
||||
return uint32s;
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnBuildShaderModuleFn(gnShaderModule* shaderModule, const gnOutputDevice& outputDeviec) {
|
||||
if (shaderModule->shaderModule == nullptr) shaderModule->shaderModule = new gnPlatformShaderModule();
|
||||
|
||||
spirv_cross::CompilerMSL::Options options;
|
||||
options.enable_decoration_binding = true;
|
||||
options.pad_argument_buffer_resources = true;
|
||||
|
||||
std::string shaderSource;
|
||||
uint32_t* data = chars_to_uint32s(shaderModule->shaderData, shaderModule->codeSize);
|
||||
if (shaderModule->shaderType == GN_VERTEX_SHADER_MODULE) {
|
||||
spirv_cross::CompilerMSL vertexMSL(data, (shaderModule->codeSize + 3) / 4);
|
||||
vertexMSL.set_msl_options(options);
|
||||
|
||||
spirv_cross::ShaderResources resources = vertexMSL.get_shader_resources();
|
||||
int largestBinding = 0;
|
||||
for (auto &resource : resources.uniform_buffers) {
|
||||
unsigned binding = vertexMSL.get_decoration(resource.id, spv::DecorationBinding) + 1;
|
||||
vertexMSL.unset_decoration(resource.id, spv::DecorationDescriptorSet);
|
||||
vertexMSL.set_decoration(resource.id, spv::DecorationBinding, binding);
|
||||
if (binding > largestBinding) largestBinding = binding;
|
||||
} // bullshit stuff to remap bindings so that metal can not being a whining little baby bitch boy
|
||||
|
||||
for (auto &resource : resources.push_constant_buffers) {
|
||||
unsigned binding = vertexMSL.get_decoration(resource.id, spv::DecorationBinding) + 1;
|
||||
vertexMSL.unset_decoration(resource.id, spv::DecorationDescriptorSet);
|
||||
vertexMSL.set_decoration(resource.id, spv::DecorationBinding, largestBinding + binding);
|
||||
} // bullshit stuff to remap push constants for metal because its being a little baby bitch boy
|
||||
shaderSource = vertexMSL.compile();
|
||||
shaderModule->shaderModule->uniformBufferOffset = 1;
|
||||
shaderModule->shaderModule->pushConstantOffset = largestBinding + 1;
|
||||
} else if (shaderModule->shaderType == GN_FRAGMENT_SHADER_MODULE) {
|
||||
spirv_cross::CompilerMSL fragmentMSL(data, (shaderModule->codeSize + 3) / 4);
|
||||
fragmentMSL.set_msl_options(options);
|
||||
|
||||
spirv_cross::ShaderResources resources = fragmentMSL.get_shader_resources();
|
||||
int largestBinding = 0;
|
||||
for (auto &resource : resources.uniform_buffers) {
|
||||
unsigned binding = fragmentMSL.get_decoration(resource.id, spv::DecorationBinding);
|
||||
if (binding > largestBinding) largestBinding = binding;
|
||||
}
|
||||
|
||||
for (auto &resource : resources.push_constant_buffers) {
|
||||
unsigned binding = fragmentMSL.get_decoration(resource.id, spv::DecorationBinding);
|
||||
fragmentMSL.unset_decoration(resource.id, spv::DecorationDescriptorSet);
|
||||
fragmentMSL.set_decoration(resource.id, spv::DecorationBinding, (largestBinding + 1) + binding);
|
||||
} // bullshit stuff to remap push constants for metal because its being a little baby bitch boy
|
||||
|
||||
int bindingIndex = 0;
|
||||
for (auto &resource : resources.sampled_images) {
|
||||
unsigned binding = fragmentMSL.get_decoration(resource.id, spv::DecorationBinding);
|
||||
unsigned set = fragmentMSL.get_decoration(resource.id, spv::DecorationDescriptorSet);
|
||||
fragmentMSL.unset_decoration(resource.id, spv::DecorationDescriptorSet);
|
||||
fragmentMSL.set_decoration(resource.id, spv::DecorationBinding, bindingIndex);
|
||||
shaderModule->shaderModule->texturesSetBindings[{set, binding}] = bindingIndex;
|
||||
bindingIndex++;
|
||||
}
|
||||
|
||||
shaderSource = fragmentMSL.compile();
|
||||
shaderModule->shaderModule->uniformBufferOffset = 0;
|
||||
shaderModule->shaderModule->pushConstantOffset = largestBinding + 1;
|
||||
} else {
|
||||
return gnReturnError(GN_UNKNOWN_SHADER_MODULE, "unknown shader module type (vertex and fragment are the only supported ones for now)");
|
||||
}
|
||||
|
||||
// std::cout << shaderSource << "\n";
|
||||
|
||||
NS::Error* error = nullptr;
|
||||
MTL::CompileOptions* mtloptions = nullptr;
|
||||
NS::String* sourceCode = NS::String::string(shaderSource.c_str(), NS::StringEncoding::UTF8StringEncoding);
|
||||
MTL::Library* shaderLib = outputDeviec.outputDevice->device->newLibrary(sourceCode, mtloptions, &error);
|
||||
if (!shaderLib)
|
||||
return gnReturnError(GN_SHADER_FAILED_TO_COMPILE, error->localizedDescription()->utf8String());
|
||||
if (shaderLib->functionNames()->count() > 1)
|
||||
return gnReturnError(GN_SHADER_FAILED_TO_COMPILE, "More than one shader function in shader");
|
||||
|
||||
shaderModule->shaderModule->shaderFunction = shaderLib->newFunction(reinterpret_cast<NS::String*>(shaderLib->functionNames()->object(0)));
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyShaderModuleFn(gnShaderModule& shaderModule) {
|
||||
shaderModule.shaderModule->shaderFunction->release();
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnBuildShaderFn(gnShader* shader) {
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
#pragma once
|
||||
#include <core/shaders/gryphn_shader.h>
|
||||
|
||||
struct gnPlatformShader {};
|
@@ -1,9 +0,0 @@
|
||||
#pragma once
|
||||
#include <core/shaders/gryphn_shader_module.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformShaderModule {
|
||||
MTL::Function* shaderFunction;
|
||||
int uniformBufferOffset = 0, pushConstantOffset = 0;
|
||||
std::unordered_map<gnUInt2, gnUInt> texturesSetBindings;
|
||||
};
|
@@ -1,34 +0,0 @@
|
||||
#include <core/sync_objects/gryphn_fence.h>
|
||||
#include <core/devices/metal_output_devices.h>
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformFence {
|
||||
MTL::SharedEvent* fence;
|
||||
MTL::SharedEventListener* listener;
|
||||
dispatch_semaphore_t semaphore;
|
||||
};
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateFenceFn(gnFence* fence, const gnOutputDevice& device) {
|
||||
fence->fence = new gnPlatformFence();
|
||||
fence->fence->fence = device.outputDevice->device->newSharedEvent();
|
||||
fence->fence->listener = MTL::SharedEventListener::alloc()->init();
|
||||
fence->fence->semaphore = dispatch_semaphore_create(1);
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnWaitForFenceFn(const gnFence& fence) {
|
||||
dispatch_semaphore_wait(fence.fence->semaphore, DISPATCH_TIME_FOREVER);
|
||||
}
|
||||
GN_EXPORT void gnResetFenceFn(gnFence& fence) {
|
||||
dispatch_semaphore_signal(fence.fence->semaphore);
|
||||
fence.fence->fence->setSignaledValue(0);
|
||||
fence.fence->fence->notifyListener(
|
||||
fence.fence->listener,
|
||||
1,
|
||||
^(MTL::SharedEvent* ev, uint64_t val) {
|
||||
dispatch_semaphore_signal(fence.fence->semaphore);
|
||||
}
|
||||
);
|
||||
}
|
||||
GN_EXPORT void gnDestroyFenceFn(gnFence& fence) {
|
||||
fence.fence->fence->release();
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
#include <core/sync_objects/gryphn_sync_semaphore.h>
|
||||
#include "metal_semaphore.h"
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateSyncSemaphoreFn(gnSyncSemaphore* semaphore, const gnOutputDevice& device) {
|
||||
semaphore->semaphore = new gnPlatformSyncSemaphore();
|
||||
semaphore->semaphore->semaphore = dispatch_semaphore_create(0);
|
||||
// semaphore->semaphore->semaphore = device.outputDevice->device->newFence();
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnDestroySyncSemaphoreFn(const gnSyncSemaphore& semaphore) {
|
||||
// semaphore.semaphore->semaphore->release();
|
||||
}
|
@@ -1,7 +0,0 @@
|
||||
#pragma once
|
||||
#include <Metal/Metal.hpp>
|
||||
|
||||
struct gnPlatformSyncSemaphore {
|
||||
// MTL::Fence* semaphore;
|
||||
dispatch_semaphore_t semaphore;
|
||||
};
|
@@ -1,84 +0,0 @@
|
||||
// #include "metal_texture.h"
|
||||
|
||||
// GN_EXPORT void gnTextureDataFn(gnTexture& texture, gnSize dataSize, const void* data) {
|
||||
// if (texture.texture == nullptr) texture.texture = new gnPlatformTexture();
|
||||
|
||||
// MTL::Region region = MTL::Region(0, 0, 0, texture.textureExtent.x, texture.textureExtent.y, 1);
|
||||
// NS::UInteger bytesPerRow = texture.textureExtent.x;
|
||||
|
||||
// if (texture.textureColorFormat == GN_RED)
|
||||
// bytesPerRow *= 1;
|
||||
// else if (texture.textureColorFormat == GN_RGB8)
|
||||
// bytesPerRow *= 3;
|
||||
// else if (texture.textureColorFormat == GN_RGBA8)
|
||||
// bytesPerRow *= 4;
|
||||
// else if (texture.textureColorFormat == GN_BGRA8)
|
||||
// bytesPerRow *= 4;
|
||||
// else if (texture.textureColorFormat == GN_DEPTH_STENCIL)
|
||||
// bytesPerRow *= 32; // this number is straight from my ass and may not work
|
||||
|
||||
// texture.texture->texture->replaceRegion(region, 0, data, bytesPerRow);
|
||||
// }
|
||||
// GN_EXPORT void gnTextureCubeMapDataFn(gnTexture& texture, gnSize imageDataSize, void* face1, void* face2, void* face3, void* face4, void* face5, void* face6) {
|
||||
// NS::UInteger bytesPerRow = texture.textureExtent.x;
|
||||
|
||||
// if (texture.textureColorFormat == GN_RED)
|
||||
// bytesPerRow *= 1;
|
||||
// else if (texture.textureColorFormat == GN_RGB8)
|
||||
// bytesPerRow *= 3;
|
||||
// else if (texture.textureColorFormat == GN_RGBA8)
|
||||
// bytesPerRow *= 4;
|
||||
// else if (texture.textureColorFormat == GN_BGRA8)
|
||||
// bytesPerRow *= 4;
|
||||
// else if (texture.textureColorFormat == GN_DEPTH_STENCIL)
|
||||
// bytesPerRow *= 32; // this number is straight from my ass and may not work
|
||||
|
||||
// MTL::Region region = MTL::Region::Make2D(0, 0, texture.textureExtent.x, texture.textureExtent.y);
|
||||
// texture.texture->texture->replaceRegion(region, 0, 0, face1, bytesPerRow, imageDataSize);
|
||||
// texture.texture->texture->replaceRegion(region, 0, 1, face2, bytesPerRow, imageDataSize);
|
||||
// texture.texture->texture->replaceRegion(region, 0, 2, face3, bytesPerRow, imageDataSize);
|
||||
// texture.texture->texture->replaceRegion(region, 0, 3, face4, bytesPerRow, imageDataSize);
|
||||
// texture.texture->texture->replaceRegion(region, 0, 4, face5, bytesPerRow, imageDataSize);
|
||||
// texture.texture->texture->replaceRegion(region, 0, 5, face6, bytesPerRow, imageDataSize);
|
||||
// }
|
||||
// GN_EXPORT gnErrorCode gnCreateTextureFn(gnTexture* texture, const gnOutputDevice& outputDevice) {
|
||||
// if (texture->texture == nullptr) texture->texture = new gnPlatformTexture();
|
||||
|
||||
// MTL::TextureDescriptor* textureDescriptor = MTL::TextureDescriptor::alloc()->init();
|
||||
// if (texture->textureType == GN_TEXTURE_CUBE_MAP) textureDescriptor->setTextureType(MTL::TextureType::TextureTypeCube);
|
||||
// if (texture->textureColorFormat == GN_RED)
|
||||
// textureDescriptor->setPixelFormat(MTL::PixelFormatR8Unorm);
|
||||
// else if (texture->textureColorFormat == GN_RGB8)
|
||||
// return gnReturnError(GN_UNSUPPORTED_COLOR_FORMAT, "GN_RGB8_UNSUPPORTED");
|
||||
// else if (texture->textureColorFormat == GN_RGBA8)
|
||||
// textureDescriptor->setPixelFormat(MTL::PixelFormatRGBA8Unorm);
|
||||
// else if (texture->textureColorFormat == GN_BGRA8)
|
||||
// textureDescriptor->setPixelFormat(MTL::PixelFormatBGRA8Unorm);
|
||||
// else if (texture->textureColorFormat == GN_DEPTH_STENCIL)
|
||||
// textureDescriptor->setPixelFormat(MTL::PixelFormatDepth32Float_Stencil8);
|
||||
// else return gnReturnError(GN_UNKNOWN_COLOR_FORMAT, "unknown pixel format");
|
||||
|
||||
// textureDescriptor->setWidth(texture->textureExtent.x);
|
||||
// textureDescriptor->setHeight(texture->textureExtent.y);
|
||||
// // textureDescriptor->setUsage(MTL::TextureUsageRenderTarget | MTL::TextureUsageShaderRead);
|
||||
|
||||
// texture->texture->texture = outputDevice.physicalOutputDevice->physicalOutputDevice->device->newTexture(textureDescriptor);
|
||||
// MTL::SamplerDescriptor* samplerDescriptor = MTL::SamplerDescriptor::alloc()->init();
|
||||
// if (texture->minFilter == GN_FILTER_LINEAR)
|
||||
// samplerDescriptor->setMinFilter(MTL::SamplerMinMagFilter::SamplerMinMagFilterLinear);
|
||||
// else
|
||||
// samplerDescriptor->setMinFilter(MTL::SamplerMinMagFilter::SamplerMinMagFilterNearest);
|
||||
|
||||
// if (texture->magFilter == GN_FILTER_LINEAR)
|
||||
// samplerDescriptor->setMagFilter(MTL::SamplerMinMagFilter::SamplerMinMagFilterLinear);
|
||||
// else
|
||||
// samplerDescriptor->setMagFilter(MTL::SamplerMinMagFilter::SamplerMinMagFilterNearest);
|
||||
// texture->texture->sampler = outputDevice.outputDevice->device->newSamplerState(samplerDescriptor);
|
||||
|
||||
// textureDescriptor->release();
|
||||
// samplerDescriptor->release();
|
||||
// return GN_SUCCESS;
|
||||
// }
|
||||
// GN_EXPORT void gnDestroyTextureFn(gnTexture& texture) {
|
||||
// texture.texture->texture->release();
|
||||
// }
|
@@ -1,9 +0,0 @@
|
||||
// #pragma once
|
||||
// #include <core/textures/gryphn_texture.h>
|
||||
// #include <core/devices/metal_output_devices.h>
|
||||
// #include <Metal/Metal.hpp>
|
||||
|
||||
// struct gnPlatformTexture {
|
||||
// MTL::Texture* texture;
|
||||
// MTL::SamplerState* sampler;
|
||||
// };
|
@@ -1,7 +0,0 @@
|
||||
#include "core/uniform_descriptor/uniform_buffer/gryphn_uniform_buffer.h"
|
||||
|
||||
GN_EXPORT void gnUpdateBufferUniformFn(gnBufferUniform& uniformBuffer, const gnOutputDevice& outputDevice) {
|
||||
if (uniformBuffer.uniform->uniformLayout->bindings[uniformBuffer.binding].type != GN_UNIFORM_BUFFER_DESCRIPTOR) {
|
||||
std::cout << "uniform layout [" << uniformBuffer.binding << "] is of type GN_SAMPLER_DESCRIPTOR but calling gnUpdateBufferUniform\n";
|
||||
}
|
||||
}
|
@@ -1,7 +0,0 @@
|
||||
#include "core/uniform_descriptor/sampler/gryphn_sampler.h"
|
||||
|
||||
GN_EXPORT void gnUpdateSamplerUniformFn(gnSamplerUniform& samplerUniform, const gnOutputDevice& outputDevice) {
|
||||
if (samplerUniform.uniform->uniformLayout->bindings[samplerUniform.binding].type != GN_SAMPLER_DESCRIPTOR) {
|
||||
std::cout << "uniform layout [" << samplerUniform.binding << "] is of type GN_UNIFORM_BUFFER_DESCRIPTOR but calling gnUpdateSamplerUniform\n";
|
||||
}
|
||||
}
|
@@ -1,8 +0,0 @@
|
||||
#include "core/uniform_descriptor/gryphn_uniform.h"
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateUniformFn(gnUniform* uniform, gnOutputDevice& outputDevice) {
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnDestroyUniformFn(gnUniform& uniform) {
|
||||
|
||||
}
|
@@ -1,118 +0,0 @@
|
||||
#include "gryphn/gryphn_utils.h"
|
||||
#include "core/commands/gryphn_command.h"
|
||||
#include "vulkan_command_buffer.h"
|
||||
#include "../graphics_pipeline/vulkan_renderpass.h"
|
||||
#include "../graphics_pipeline/vulkan_graphics_pipeline.h"
|
||||
#include "../vertex_buffers/vulkan_buffers.h"
|
||||
#include "../push_constant/vulkan_push_constant.h"
|
||||
#include "core/shaders/gryphn_shader_module.h"
|
||||
#include "core/uniform_descriptor/uniform_buffer/gryphn_uniform_buffer.h"
|
||||
#include "../uniform_descriptor/vulkan_uniform.h"
|
||||
#include <vulkan/vulkan_core.h>
|
||||
#include "../framebuffers/vulkan_framebuffer.h"
|
||||
#include <array>
|
||||
|
||||
GN_EXPORT gnReturnCode gnCommandBufferStartFn(const gnCommandBuffer& commandBuffer) {
|
||||
VkCommandBufferBeginInfo beginInfo{};
|
||||
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
||||
beginInfo.flags = 0; // Optional
|
||||
beginInfo.pInheritanceInfo = nullptr; // Optional
|
||||
|
||||
if (vkBeginCommandBuffer(commandBuffer.commandBuffer->commandBuffer, &beginInfo) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnCommandBeginRenderPassFn(gnCommandBuffer& commandBuffer, const gnRenderPassFrame& frame) {
|
||||
gnRenderPassFrame* frameptr = const_cast<gnRenderPassFrame*>(&frame);
|
||||
if (frame.renderPassFrame == nullptr) frameptr->renderPassFrame = new gnPlatformRenderPassFrame();
|
||||
frameptr->renderPassFrame->renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
|
||||
frameptr->renderPassFrame->renderPassInfo.renderPass = frameptr->renderPass->renderpass->renderPass;
|
||||
frameptr->renderPassFrame->renderPassInfo.framebuffer = frameptr->framebuffer->framebuffer->framebuffer;
|
||||
frameptr->renderPassFrame->renderPassInfo.renderArea.offset = { (int)frameptr->offset.x, (int)frameptr->offset.y };
|
||||
frameptr->renderPassFrame->renderPassInfo.renderArea.extent = { frameptr->area.x, frameptr->area.y };
|
||||
|
||||
std::array<VkClearValue, 2> clearValues{};
|
||||
clearValues[0].color = {{frame.clearColor.r / 255.0f, frame.clearColor.g / 255.0f, frame.clearColor.b / 255.0f, frame.clearColor.a}};
|
||||
clearValues[1].depthStencil = {1.0f, 0};
|
||||
frame.renderPassFrame->renderPassInfo.clearValueCount = static_cast<uint32_t>(clearValues.size());
|
||||
frame.renderPassFrame->renderPassInfo.pClearValues = clearValues.data();
|
||||
|
||||
vkCmdBeginRenderPass(commandBuffer.commandBuffer->commandBuffer, &frame.renderPassFrame->renderPassInfo, VK_SUBPASS_CONTENTS_INLINE);
|
||||
}
|
||||
|
||||
GN_EXPORT void gnCommandSetGraphicsPipelineFn(const gnCommandBuffer& commandBuffer, const gnGraphicsPipeline& graphicsPipeline) {
|
||||
vkCmdBindPipeline(commandBuffer.commandBuffer->commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, graphicsPipeline.graphicsPipeline->graphicsPipeline);
|
||||
}
|
||||
|
||||
GN_EXPORT void gnCommandSetViewportFn(const gnCommandBuffer& commandBuffer, gnViewportDescriptionData data) {
|
||||
commandBuffer.commandBuffer->viewport.x = data.offset.x;
|
||||
commandBuffer.commandBuffer->viewport.y = data.offset.y;
|
||||
commandBuffer.commandBuffer->viewport.width = data.size.x;
|
||||
commandBuffer.commandBuffer->viewport.height = data.size.y;
|
||||
commandBuffer.commandBuffer->viewport.minDepth = data.depth.a;
|
||||
commandBuffer.commandBuffer->viewport.maxDepth = data.depth.b;
|
||||
|
||||
vkCmdSetViewport(commandBuffer.commandBuffer->commandBuffer, 0, 1, &commandBuffer.commandBuffer->viewport);
|
||||
}
|
||||
GN_EXPORT void gnCommandSetScissorFn(const gnCommandBuffer& commandBuffer, gnScissorDescriptionData data) {
|
||||
commandBuffer.commandBuffer->scissor.offset = {(int)data.offset.x, (int)data.offset.y};
|
||||
commandBuffer.commandBuffer->scissor.extent = { data.extent.x, data.extent.y };
|
||||
vkCmdSetScissor(commandBuffer.commandBuffer->commandBuffer, 0, 1, &commandBuffer.commandBuffer->scissor);
|
||||
}
|
||||
GN_EXPORT void gnCommandBindBufferFn(const gnCommandBuffer& commandBuffer, const gnBuffer& buffer) {
|
||||
if (buffer.bufferType == GN_VERTEX_BUFFER) {
|
||||
VkBuffer vertexBuffers[] = {buffer.buffer->buffer};
|
||||
VkDeviceSize offsets[] = {0};
|
||||
vkCmdBindVertexBuffers(commandBuffer.commandBuffer->commandBuffer, 0, 1, vertexBuffers, offsets);
|
||||
} else {
|
||||
VkIndexType type;
|
||||
if (buffer.dataType == GN_UINT8) { /* TODO: switch to vulkan 1.4 */ }
|
||||
if (buffer.dataType == GN_UINT16) { type = VK_INDEX_TYPE_UINT16; /* TODO: switch to vulkan 1.4 */ }
|
||||
if (buffer.dataType == GN_UINT32) { type = VK_INDEX_TYPE_UINT32; /* TODO: switch to vulkan 1.4 */ }
|
||||
|
||||
vkCmdBindIndexBuffer(commandBuffer.commandBuffer->commandBuffer, buffer.buffer->buffer, 0, type);
|
||||
}
|
||||
}
|
||||
|
||||
GN_EXPORT void gnCommandDrawFn(const gnCommandBuffer& commandBuffer, int vertexCount, int instanceCount, int firstVertex, int firstInstance) {
|
||||
vkCmdDraw(commandBuffer.commandBuffer->commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
|
||||
}
|
||||
GN_EXPORT void gnCommandDrawIndexedFn(const gnCommandBuffer& commandBuffer, gnUInt indexCount, gnUInt instanceCount, gnUInt firstIndex, gnInt vertexOffset, gnUInt firstInstance) {
|
||||
vkCmdDrawIndexed(commandBuffer.commandBuffer->commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
|
||||
}
|
||||
GN_EXPORT void gnCommandBindBufferUniformFn(const gnCommandBuffer& commandBuffer, gnGraphicsPipeline& graphicsPipeline, gnBufferUniform& uniformBuffer, gnInt set) {
|
||||
vkCmdBindDescriptorSets(
|
||||
commandBuffer.commandBuffer->commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
graphicsPipeline.graphicsPipeline->pipelineLayout, set, 1,
|
||||
&uniformBuffer.uniform->uniform->descriptorSets[uniformBuffer.index], 0, nullptr
|
||||
);
|
||||
}
|
||||
GN_EXPORT void gnCommandBindSamplerUniformFn(const gnCommandBuffer& commandBuffer, const gnGraphicsPipeline& graphicsPipeline, const gnSamplerUniform& sampler, gnInt set) {
|
||||
vkCmdBindDescriptorSets(
|
||||
commandBuffer.commandBuffer->commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
graphicsPipeline.graphicsPipeline->pipelineLayout, set, 1,
|
||||
&sampler.uniform->uniform->descriptorSets[sampler.index], 0, nullptr
|
||||
);
|
||||
}
|
||||
GN_EXPORT void gnCommandPushConstantFn(gnCommandBuffer& commandBuffer, const gnGraphicsPipeline& graphicsPipeline, const gnPushConstant& pushConstant, void* data) {
|
||||
int stageBit = 0;
|
||||
|
||||
if (gnContainsShaderStage(pushConstant.stage, GN_VERTEX_SHADER_MODULE)) stageBit |= VK_SHADER_STAGE_VERTEX_BIT;
|
||||
if (gnContainsShaderStage(pushConstant.stage, GN_FRAGMENT_SHADER_MODULE)) stageBit |= VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
|
||||
vkCmdPushConstants(commandBuffer.commandBuffer->commandBuffer,
|
||||
graphicsPipeline.graphicsPipeline->pipelineLayout,
|
||||
stageBit, pushConstant.offset, pushConstant.size, data);
|
||||
}
|
||||
GN_EXPORT void gnCommandEndRenderPassFn(const gnCommandBuffer& commandBuffer) {
|
||||
vkCmdEndRenderPass(commandBuffer.commandBuffer->commandBuffer);
|
||||
}
|
||||
GN_EXPORT gnReturnCode gnCommandBufferEndFn(const gnCommandBuffer& commandBuffer) {
|
||||
if (vkEndCommandBuffer(commandBuffer.commandBuffer->commandBuffer) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,143 +0,0 @@
|
||||
#include "gryphn/gryphn_utils.h"
|
||||
#include "core/commands/gryphn_command_buffer.h"
|
||||
#include "../graphics_pipeline/vulkan_renderpass.h"
|
||||
#include "../presentation_queue/vulkan_queue_families.h"
|
||||
#include "../instance/vulkan_instance.h"
|
||||
#include "vulkan_command_buffer.h"
|
||||
#include "../graphics_pipeline/vulkan_graphics_pipeline.h"
|
||||
|
||||
VkCommandBuffer beginSingleTimeCommands(const gnOutputDevice& outputDevice) {
|
||||
VkCommandBufferAllocateInfo allocInfo{};
|
||||
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
allocInfo.commandPool = outputDevice.outputDevice->commandPool;
|
||||
allocInfo.commandBufferCount = 1;
|
||||
|
||||
VkCommandBuffer commandBuffer;
|
||||
vkAllocateCommandBuffers(outputDevice.outputDevice->device, &allocInfo, &commandBuffer);
|
||||
|
||||
VkCommandBufferBeginInfo beginInfo{};
|
||||
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
||||
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
||||
|
||||
vkBeginCommandBuffer(commandBuffer, &beginInfo);
|
||||
|
||||
return commandBuffer;
|
||||
}
|
||||
|
||||
|
||||
void endSingleTimeCommands(VkCommandBuffer commandBuffer, const gnOutputDevice& outputDevice) {
|
||||
vkEndCommandBuffer(commandBuffer);
|
||||
|
||||
VkSubmitInfo submitInfo{};
|
||||
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
||||
submitInfo.commandBufferCount = 1;
|
||||
submitInfo.pCommandBuffers = &commandBuffer;
|
||||
|
||||
vkQueueSubmit(outputDevice.outputDevice->graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
|
||||
vkQueueWaitIdle(outputDevice.outputDevice->graphicsQueue);
|
||||
|
||||
vkFreeCommandBuffers(outputDevice.outputDevice->device, outputDevice.outputDevice->commandPool, 1, &commandBuffer);
|
||||
}
|
||||
|
||||
// GN_EXPORT gnReturnCode _gnCreateCommandBuffersFn(gnList<gnCommandBuffer> *commandBuffers, const gnGraphicsPipeline& pipeline) {
|
||||
// std::vector<VkCommandBuffer> commandBufferList;
|
||||
// for (int i = 0; i < gnListLength(*commandBuffers); i++) {
|
||||
// if ((*commandBuffers)[i].commandBuffer == nullptr) (*commandBuffers)[i].commandBuffer = new gnPlatformCommandBuffer();
|
||||
|
||||
// (*commandBuffers)[i].commandBuffer->outputDevice = pipeline.renderPass->renderpass->outputDevice;
|
||||
// commandBufferList.push_back((*commandBuffers)[i].commandBuffer->commandBuffer);
|
||||
// }
|
||||
|
||||
// VkCommandBufferAllocateInfo allocInfo{};
|
||||
// allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
// allocInfo.commandPool = *pipeline.graphicsPipeline->commandPool;
|
||||
// allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
// allocInfo.commandBufferCount = (uint32_t) gnListLength(*commandBuffers);
|
||||
|
||||
// if (vkAllocateCommandBuffers(pipeline.renderPass->renderpass->outputDevice->outputDevice->device, &allocInfo, commandBufferList.data()) != VK_SUCCESS) {
|
||||
// return GN_FAILED;
|
||||
// }
|
||||
|
||||
// for (int i = 0; i < gnListLength(*commandBuffers); i++) {
|
||||
// (*commandBuffers)[i].commandBuffer->commandBuffer = commandBufferList[i];
|
||||
// }
|
||||
|
||||
// return GN_SUCCESS;
|
||||
// }
|
||||
|
||||
// GN_EXPORT gnReturnCode _gnCreateCommandBuffersFn(std::vector<gnCommandBuffer>* commandBuffers, const gnGraphicsPipeline &pipeline) {
|
||||
// std::vector<VkCommandBuffer> commandBufferList;
|
||||
// for (int i = 0; i < commandBuffers->size(); i++) {
|
||||
// (*commandBuffers)[i].commandBuffer->outputDevice = pipeline.renderPass->renderpass->outputDevice;
|
||||
// commandBufferList.push_back((*commandBuffers)[i].commandBuffer->commandBuffer);
|
||||
// }
|
||||
|
||||
// VkCommandBufferAllocateInfo allocInfo{};
|
||||
// allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
// allocInfo.commandPool = *pipeline.graphicsPipeline->commandPool;
|
||||
// allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
// allocInfo.commandBufferCount = (uint32_t) commandBuffers->size();
|
||||
|
||||
// if (vkAllocateCommandBuffers(pipeline.renderPass->renderpass->outputDevice->outputDevice->device, &allocInfo, commandBufferList.data()) != VK_SUCCESS) {
|
||||
// return GN_FAILED;
|
||||
// }
|
||||
|
||||
// for (int i = 0; i < commandBuffers->size(); i++) {
|
||||
// (*commandBuffers)[i].commandBuffer->commandBuffer = commandBufferList[i];
|
||||
// }
|
||||
|
||||
// return GN_SUCCESS;
|
||||
// }
|
||||
|
||||
GN_EXPORT gnReturnCode _gnCreateCommandBuffersFn(gnCommandBuffer* commandBuffers, gnUInt commandBufferCount, const gnOutputDevice& outputDevice) {
|
||||
std::vector<VkCommandBuffer> commandBufferList;
|
||||
for (int i = 0; i < commandBufferCount; i++) {
|
||||
commandBuffers[i].commandBuffer = new gnPlatformCommandBuffer();
|
||||
commandBuffers[i].commandBuffer->outputDevice = const_cast<gnOutputDevice*>(&outputDevice);
|
||||
commandBufferList.push_back(commandBuffers[i].commandBuffer->commandBuffer);
|
||||
}
|
||||
|
||||
VkCommandBufferAllocateInfo allocInfo{};
|
||||
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
allocInfo.commandPool = outputDevice.outputDevice->commandPool;
|
||||
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
allocInfo.commandBufferCount = (uint32_t) commandBufferCount;
|
||||
|
||||
if (vkAllocateCommandBuffers(outputDevice.outputDevice->device, &allocInfo, commandBufferList.data()) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
for (int i = 0; i < commandBufferCount; i++) {
|
||||
commandBuffers[i].commandBuffer->commandBuffer = commandBufferList[i];
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateCommandBufferFn(gnCommandBuffer* commandBuffer, const gnOutputDevice& device) {
|
||||
commandBuffer->commandBuffer->outputDevice = const_cast<gnOutputDevice*>(&device);
|
||||
|
||||
{ // create the command buffer
|
||||
VkCommandBufferAllocateInfo allocInfo{};
|
||||
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
allocInfo.commandPool = device.outputDevice->commandPool;
|
||||
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
allocInfo.commandBufferCount = 1;
|
||||
|
||||
if (vkAllocateCommandBuffers(device.outputDevice->device, &allocInfo, &commandBuffer->commandBuffer->commandBuffer) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnCommandBufferResetFn(const gnCommandBuffer& commandBuffer) {
|
||||
vkResetCommandBuffer(commandBuffer.commandBuffer->commandBuffer, 0);
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyCommandBufferFn(const gnCommandBuffer& commandBuffer) {
|
||||
// do nothing this function is archaic
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
#pragma once
|
||||
#include "core/graphics_pipeline/gryphn_graphics_pipeline.h"
|
||||
#include <vulkan/vulkan.h>
|
||||
|
||||
struct gnPlatformCommandBuffer {
|
||||
VkCommandBuffer commandBuffer;
|
||||
|
||||
gnOutputDevice* outputDevice;
|
||||
|
||||
VkViewport viewport{};
|
||||
VkRect2D scissor{};
|
||||
};
|
||||
|
||||
VkCommandBuffer beginSingleTimeCommands(const gnOutputDevice& outputDevice);
|
||||
void endSingleTimeCommands(VkCommandBuffer commandBuffer, const gnOutputDevice& outputDevice);
|
@@ -1,55 +0,0 @@
|
||||
#include "core/commands/present_command/gryphn_command_present.h"
|
||||
#include "../presentation_queue/vulkan_presentation_queue.h"
|
||||
#include "../sync_objects/vulkan_sync_semaphore.h"
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
|
||||
struct gnPlatformCommandPresentData {
|
||||
VkPresentInfoKHR presentInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR
|
||||
};
|
||||
gnPresentationQueue* presentationQueue;
|
||||
VkResult result;
|
||||
};
|
||||
|
||||
void gnCommandPresentDataSetSignalSemaphoreFn(gnCommandPresentData& presentCommandData, const gnSyncSemaphore& semaphore) {
|
||||
presentCommandData.commandPresentData->presentInfo.waitSemaphoreCount = 1;
|
||||
presentCommandData.commandPresentData->presentInfo.pWaitSemaphores = &semaphore.semaphore->semaphore;
|
||||
}
|
||||
void gnCommandPresentDataSetPresentationQueueFn(gnCommandPresentData& presentCommandData, const gnPresentationQueue& presentationQueue) {
|
||||
presentCommandData.commandPresentData->presentInfo.swapchainCount = 1;
|
||||
presentCommandData.commandPresentData->presentInfo.pSwapchains = &presentationQueue.presentationQueue->swapChain;
|
||||
|
||||
presentCommandData.commandPresentData->presentationQueue = const_cast<gnPresentationQueue*>(&presentationQueue);
|
||||
}
|
||||
|
||||
void gnCommandPresentDataSetImageIndexFn(gnCommandPresentData& presentCommandData, gnUInt* imageIndex) {
|
||||
presentCommandData.commandPresentData->presentInfo.pImageIndices = imageIndex;
|
||||
}
|
||||
GN_EXPORT gnPresentationQueueState gnCommandPresentGetValidPresentationQueueFn(gnCommandPresentData& presentCommandData) {
|
||||
if (presentCommandData.commandPresentData->result == VK_ERROR_OUT_OF_DATE_KHR) {
|
||||
return GN_OUT_OF_DATE;
|
||||
} else if (presentCommandData.commandPresentData->result == VK_SUBOPTIMAL_KHR) {
|
||||
return GN_SUBOPTIMAL;
|
||||
}
|
||||
else if (presentCommandData.commandPresentData->result == VK_SUCCESS) {
|
||||
return GN_VALID;
|
||||
}
|
||||
return GN_VALID;
|
||||
}
|
||||
GN_EXPORT gnReturnCode gnCommandPresentFn(gnCommandPresentData& presentCommandData) {
|
||||
if (presentCommandData.commandPresentData == nullptr) presentCommandData.commandPresentData = new gnPlatformCommandPresentData();
|
||||
|
||||
gnCommandPresentDataSetSignalSemaphoreFn(presentCommandData, *presentCommandData.semaphore);
|
||||
gnCommandPresentDataSetPresentationQueueFn(presentCommandData, *presentCommandData.presentationQueue);
|
||||
gnCommandPresentDataSetImageIndexFn(presentCommandData, presentCommandData.imageIndex);
|
||||
|
||||
uint32_t imageIndex = *presentCommandData.imageIndex;
|
||||
presentCommandData.commandPresentData->presentInfo.pImageIndices = &imageIndex;
|
||||
|
||||
presentCommandData.commandPresentData->result =
|
||||
vkQueuePresentKHR(presentCommandData.commandPresentData->presentationQueue->presentationQueue->outputDevice->outputDevice->presentQueue, &presentCommandData.commandPresentData->presentInfo);
|
||||
if (presentCommandData.commandPresentData->result != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,56 +0,0 @@
|
||||
#include <gryphn/gryphn_utils.h>
|
||||
#include "core/commands/submit_command/gryphn_command_submit.h"
|
||||
#include "../sync_objects/vulkan_sync_semaphore.h"
|
||||
#include "vulkan_command_buffer.h"
|
||||
#include "../sync_objects/vulkan_fence.h"
|
||||
|
||||
struct gnPlatformCommandSubmitData {
|
||||
VkSubmitInfo submitInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO
|
||||
};
|
||||
VkResult result;
|
||||
};
|
||||
|
||||
VkPipelineStageFlags waitStages[] = {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
|
||||
|
||||
void gnCommandSubmitDataSetWaitSemaphoreFn(gnCommandSubmitData& data, const gnSyncSemaphore& semaphore) {
|
||||
data.commandSubmitData->submitInfo.waitSemaphoreCount = 1;
|
||||
data.commandSubmitData->submitInfo.pWaitSemaphores = &semaphore.semaphore->semaphore;
|
||||
data.commandSubmitData->submitInfo.pWaitDstStageMask = waitStages;
|
||||
}
|
||||
|
||||
void gnCommandSubmitDataSetCommandBufferFn(gnCommandSubmitData& data, const gnCommandBuffer& commandBuffer) {
|
||||
data.commandSubmitData->submitInfo.commandBufferCount = 1;
|
||||
data.commandSubmitData->submitInfo.pCommandBuffers = &commandBuffer.commandBuffer->commandBuffer;
|
||||
}
|
||||
|
||||
void gnCommandSubmitDataSetSignalSemaphoreFn(gnCommandSubmitData& data, const gnSyncSemaphore& semaphore) {
|
||||
data.commandSubmitData->submitInfo.signalSemaphoreCount = 1;
|
||||
data.commandSubmitData->submitInfo.pSignalSemaphores = &semaphore.semaphore->semaphore;
|
||||
}
|
||||
GN_EXPORT gnPresentationQueueState gnCommandSubmitGetValidPresentationQueueFn(gnCommandSubmitData& data) {
|
||||
if (data.commandSubmitData->result == VK_ERROR_OUT_OF_DATE_KHR) {
|
||||
return GN_OUT_OF_DATE;
|
||||
} else if (data.commandSubmitData->result == VK_SUBOPTIMAL_KHR) {
|
||||
return GN_SUBOPTIMAL;
|
||||
}
|
||||
else if (data.commandSubmitData->result == VK_SUCCESS) {
|
||||
return GN_VALID;
|
||||
}
|
||||
return GN_VALID;
|
||||
}
|
||||
GN_EXPORT gnReturnCode gnCommandSubmitFn(gnCommandSubmitData& data, const gnFence& fence) {
|
||||
if (data.commandSubmitData == nullptr) data.commandSubmitData = new gnPlatformCommandSubmitData();
|
||||
|
||||
gnCommandSubmitDataSetWaitSemaphoreFn(data, *data.waitSemaphore);
|
||||
gnCommandSubmitDataSetCommandBufferFn(data, *data.commandBuffer);
|
||||
gnCommandSubmitDataSetSignalSemaphoreFn(data, *data.signalSemaphore);
|
||||
|
||||
data.commandSubmitData->result = vkQueueSubmit(fence.fence->device->outputDevice->graphicsQueue, 1, &data.commandSubmitData->submitInfo, fence.fence->fence);
|
||||
|
||||
if (data.commandSubmitData->result != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,77 +0,0 @@
|
||||
#include "core/framebuffers/gryphn_framebuffer.h"
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
#include "../presentation_queue/vulkan_presentation_queue.h"
|
||||
#include "../graphics_pipeline/vulkan_renderpass.h"
|
||||
#include "vulkan_framebuffer.h"
|
||||
#include "../textures/vulkan_texture.h"
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateFramebufferFn(gnFramebuffer* framebuffer, const gnRenderPass& renderpass) {
|
||||
if (framebuffer->framebuffer == nullptr) framebuffer->framebuffer = new gnPlatformFramebuffer();
|
||||
std::vector<VkImageView> attachments = {};
|
||||
|
||||
for (int i = 0; i < gnListLength(framebuffer->framebufferAttachments); i++) {
|
||||
if (framebuffer->framebufferAttachments[i].texture->texture == nullptr) framebuffer->framebufferAttachments[i].texture->texture = new gnPlatformTexture();
|
||||
framebuffer->framebufferAttachments[i].texture->texture->outputDevice = renderpass.renderpass->outputDevice;
|
||||
attachments.push_back(framebuffer->framebufferAttachments[i].texture->texture->textureImageView);
|
||||
};
|
||||
|
||||
VkFramebufferCreateInfo framebufferInfo{};
|
||||
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
|
||||
framebufferInfo.renderPass = renderpass.renderpass->renderPass;
|
||||
framebufferInfo.attachmentCount = static_cast<uint32_t>(attachments.size());
|
||||
framebufferInfo.pAttachments = attachments.data();
|
||||
framebufferInfo.width = framebuffer->size.x;
|
||||
framebufferInfo.height = framebuffer->size.y;
|
||||
framebufferInfo.layers = 1;
|
||||
|
||||
// framebuffer->framebuffer->pipeline = const_cast<gnGraphicsPipeline*>(&pipeline);
|
||||
framebuffer->framebuffer->outputDevice = renderpass.renderpass->outputDevice;
|
||||
|
||||
|
||||
if (vkCreateFramebuffer(renderpass.renderpass->outputDevice->outputDevice->device, &framebufferInfo, nullptr, &framebuffer->framebuffer->framebuffer) != VK_SUCCESS) {
|
||||
return gnReturnError(GN_FAILED_TO_CREATE_FRAMEBUFFER, "im to lazy to query vulkan why");
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateFramebufferAttachmentFn(gnFramebufferAttachment* attachment, gnPresentationQueue& queue) {
|
||||
if (attachment->framebufferAttachment == nullptr) attachment->framebufferAttachment = new gnPlatformFramebufferAttachment();
|
||||
if (attachment->colorMode == GN_RGBA8) {
|
||||
attachment->framebufferAttachment->attachment.format = queue.presentationQueue->swapchainDetails.surfaceFormat.format;
|
||||
attachment->framebufferAttachment->attachment.samples = VK_SAMPLE_COUNT_1_BIT;
|
||||
attachment->framebufferAttachment->attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
|
||||
attachment->framebufferAttachment->attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
|
||||
attachment->framebufferAttachment->attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
||||
attachment->framebufferAttachment->attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
||||
attachment->framebufferAttachment->attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
attachment->framebufferAttachment->attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
|
||||
|
||||
attachment->framebufferAttachment->attachmentRef.attachment = 0;
|
||||
attachment->framebufferAttachment->attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
|
||||
} else if (attachment->colorMode == GN_DEPTH_STENCIL) {
|
||||
VkFormat format;
|
||||
gnReturnCode depthFormatReturnError = findDepthFormat(*queue.presentationQueue->outputDevice, format);
|
||||
if (depthFormatReturnError != GN_SUCCESS) return depthFormatReturnError;
|
||||
|
||||
attachment->framebufferAttachment->attachment.format = format;
|
||||
attachment->framebufferAttachment->attachment.samples = VK_SAMPLE_COUNT_1_BIT;
|
||||
attachment->framebufferAttachment->attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
|
||||
attachment->framebufferAttachment->attachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
||||
attachment->framebufferAttachment->attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
||||
attachment->framebufferAttachment->attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
||||
attachment->framebufferAttachment->attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
attachment->framebufferAttachment->attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
||||
|
||||
}
|
||||
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
// texture
|
||||
|
||||
GN_EXPORT void gnDestroyFramebufferFn(const gnFramebuffer& framebuffer) {
|
||||
vkDestroyFramebuffer(framebuffer.framebuffer->outputDevice->outputDevice->device, framebuffer.framebuffer->framebuffer, nullptr);
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
#pragma once
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "core/graphics_pipeline/gryphn_graphics_pipeline.h"
|
||||
#include <core/framebuffers/gryphn_framebuffer.h>
|
||||
|
||||
struct gnPlatformFramebuffer {
|
||||
VkFramebuffer framebuffer;
|
||||
gnRenderPass* renderpass;
|
||||
gnOutputDevice* outputDevice;
|
||||
};
|
||||
|
||||
struct gnPlatformFramebufferAttachment {
|
||||
VkAttachmentDescription attachment{};
|
||||
VkAttachmentReference attachmentRef{};
|
||||
};
|
@@ -1,267 +0,0 @@
|
||||
#include "vulkan_graphics_pipeline.h"
|
||||
#include "../shaders/vulkan_shader_module.h"
|
||||
#include "vulkan_renderpass.h"
|
||||
#include "../vertex_buffers/vertex_descriptions/vulkan_vertex_description.h"
|
||||
#include "../uniform_descriptor/vulkan_uniform_layout.h"
|
||||
#include "../push_constant/vulkan_push_constant.h"
|
||||
|
||||
void vulkanCreateGraphicsPipeline(gnGraphicsPipeline* pipeline) {
|
||||
if (pipeline->graphicsPipeline == nullptr) pipeline->graphicsPipeline = new gnPlatformGraphicsPipeline();
|
||||
|
||||
pipeline->graphicsPipeline->inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
|
||||
pipeline->graphicsPipeline->rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
|
||||
pipeline->graphicsPipeline->rasterizer.rasterizerDiscardEnable = VK_FALSE;
|
||||
pipeline->graphicsPipeline->dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
|
||||
pipeline->graphicsPipeline->viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
|
||||
pipeline->graphicsPipeline->multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
|
||||
pipeline->graphicsPipeline->colorBlending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
|
||||
pipeline->graphicsPipeline->colorBlending.logicOpEnable = VK_FALSE;
|
||||
pipeline->graphicsPipeline->colorBlending.attachmentCount = 1;
|
||||
pipeline->graphicsPipeline->colorBlending.blendConstants[0] = 0.0f;
|
||||
pipeline->graphicsPipeline->colorBlending.blendConstants[1] = 0.0f;
|
||||
pipeline->graphicsPipeline->colorBlending.blendConstants[2] = 0.0f;
|
||||
pipeline->graphicsPipeline->colorBlending.blendConstants[3] = 0.0f;
|
||||
pipeline->graphicsPipeline->depthStencil.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
|
||||
pipeline->graphicsPipeline->pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineSetPrimativeFn(gnGraphicsPipeline& pipeline, gnPrimative primative) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.primative = primative;
|
||||
|
||||
if (primative == GN_POINTS) pipeline.graphicsPipeline->inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
|
||||
else if (primative == GN_LINES) pipeline.graphicsPipeline->inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
|
||||
else if (primative == GN_LINE_STRIP) pipeline.graphicsPipeline->inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
|
||||
else if (primative == GN_TRIANGLES) pipeline.graphicsPipeline->inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
|
||||
else { /* TODO: throw debugger runtime errors */ }
|
||||
|
||||
pipeline.graphicsPipeline->inputAssembly.primitiveRestartEnable = VK_FALSE;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineEnableDynamicStatesFn(gnGraphicsPipeline& pipeline, const gnBool enable) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.graphicsPipeline->dynamicStatesEnabled = true;
|
||||
|
||||
pipeline.graphicsPipeline->dynamicState.dynamicStateCount = static_cast<uint32_t>(pipeline.graphicsPipeline->dynamicStates.size());
|
||||
pipeline.graphicsPipeline->dynamicState.pDynamicStates = pipeline.graphicsPipeline->dynamicStates.data();
|
||||
}
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineEnableDynamicStateFn(gnGraphicsPipeline& pipeline, const gnDynamicState state) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
VkDynamicState dynamicState;
|
||||
if (state == GN_DYNAMIC_STATE_VIEWPORT) dynamicState = VK_DYNAMIC_STATE_VIEWPORT;
|
||||
if (state == GN_DYNAMIC_STATE_SCISSOR) dynamicState = VK_DYNAMIC_STATE_SCISSOR;
|
||||
pipeline.graphicsPipeline->dynamicStates.push_back(dynamicState);
|
||||
}
|
||||
|
||||
GN_EXPORT void _gnGraphicsPipelineSetViewportFn(gnGraphicsPipeline& pipeline, gnUInt2 position, gnUInt2 size, gnFloat minDepth, gnFloat maxDepth) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.graphicsPipeline->viewport.x = position.x;
|
||||
pipeline.graphicsPipeline->viewport.y = position.y;
|
||||
pipeline.graphicsPipeline->viewport.width = (float)size.x;
|
||||
pipeline.graphicsPipeline->viewport.height = (float)size.y;
|
||||
pipeline.graphicsPipeline->viewport.minDepth = minDepth;
|
||||
pipeline.graphicsPipeline->viewport.maxDepth = maxDepth;
|
||||
pipeline.graphicsPipeline->viewportState.viewportCount = 1;
|
||||
|
||||
if (!pipeline.graphicsPipeline->dynamicStateEnabled(VK_DYNAMIC_STATE_VIEWPORT)) {
|
||||
pipeline.graphicsPipeline->viewportState.pViewports = &pipeline.graphicsPipeline->viewport;
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetCropFn(gnGraphicsPipeline& pipeline, gnInt2 position, gnUInt2 size) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.graphicsPipeline->scissor.offset = { position.x, position.y };
|
||||
pipeline.graphicsPipeline->scissor.extent = {size.x, size.y};
|
||||
pipeline.graphicsPipeline->viewportState.scissorCount = 1;
|
||||
|
||||
if (!pipeline.graphicsPipeline->dynamicStateEnabled(VK_DYNAMIC_STATE_VIEWPORT)) {
|
||||
pipeline.graphicsPipeline->viewportState.pScissors = &pipeline.graphicsPipeline->scissor;
|
||||
}
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetDepthClampFn(gnGraphicsPipeline& pipeline, gnBool enableDepthClamp) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.graphicsPipeline->rasterizer.depthClampEnable = (enableDepthClamp == true) ? VK_TRUE : VK_FALSE;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetFillModeFn(gnGraphicsPipeline& pipeline, gnFillMode fillMode) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
VkPolygonMode polygoneMode = VK_POLYGON_MODE_FILL;
|
||||
if (fillMode == GN_POLYGON_FILLMODE_FILL) polygoneMode = VK_POLYGON_MODE_FILL;
|
||||
if (fillMode == GN_POLYGON_FILLMODE_LINES) polygoneMode = VK_POLYGON_MODE_LINE;
|
||||
if (fillMode == GN_POLYGON_FILLMODE_POINTS) polygoneMode = VK_POLYGON_MODE_POINT;
|
||||
|
||||
pipeline.graphicsPipeline->rasterizer.polygonMode = polygoneMode;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetLineWidthFn(gnGraphicsPipeline& pipeline, gnFloat lineWidth) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.graphicsPipeline->rasterizer.lineWidth = lineWidth;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetCullModeFn(gnGraphicsPipeline& pipeline, gnCullMode cullMode, gnFrontFaceDirection direction) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
VkCullModeFlags cullmode;
|
||||
if(cullMode == GN_CULL_NONE) pipeline.graphicsPipeline->rasterizer.cullMode = VK_CULL_MODE_NONE;
|
||||
else if(cullMode == GN_CULL_BACKFACE) pipeline.graphicsPipeline->rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
|
||||
else if(cullMode == GN_CULL_FRONTFACE) pipeline.graphicsPipeline->rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
|
||||
// else if(cullMode == GN_CULL_ALL) pipeline.graphicsPipeline->rasterizer.cullMode = VK_CULL_MODE_FRONT_AND_BACK;
|
||||
|
||||
pipeline.graphicsPipeline->rasterizer.frontFace = (direction == GN_CLOCKWISE) ? VK_FRONT_FACE_CLOCKWISE : VK_FRONT_FACE_COUNTER_CLOCKWISE;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetMultisamplingFn(gnGraphicsPipeline& pipeline, gnBool enableMultisampling) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
if (enableMultisampling) { /* TODO: Debugger needs to throw some errors */ return; }
|
||||
|
||||
pipeline.graphicsPipeline->multisampling.sampleShadingEnable = VK_FALSE;
|
||||
pipeline.graphicsPipeline->multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
|
||||
}
|
||||
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineSetColorBlendFn(gnGraphicsPipeline& pipeline, gnBool colorBlend) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
// if (colorBlend) { /* TODO: Debugger needs to throw some errors */ return; }
|
||||
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.blendEnable = VK_TRUE;
|
||||
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.colorBlendOp = VK_BLEND_OP_ADD;
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.alphaBlendOp = VK_BLEND_OP_ADD;
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
|
||||
pipeline.graphicsPipeline->colorBlendAttachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
|
||||
|
||||
pipeline.graphicsPipeline->colorBlending.pAttachments = &pipeline.graphicsPipeline->colorBlendAttachment;
|
||||
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetRenderPassFn(gnGraphicsPipeline& pipeline, gnRenderPass& renderpass) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.renderPass = &renderpass;
|
||||
|
||||
pipeline.graphicsPipeline->pipelineInfo.renderPass = renderpass.renderpass->renderPass;
|
||||
pipeline.graphicsPipeline->pipelineInfo.subpass = 0;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineBindShaderFn(gnGraphicsPipeline& pipeline, const gnShader& shader) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.graphicsPipeline->pipelineInfo.stageCount = gnListLength(shader.shaderModules);
|
||||
|
||||
for (int i = 0; i < gnListLength(shader.shaderModules); i++) {
|
||||
pipeline.graphicsPipeline->shaderStages.push_back(shader.shaderModules[i].shaderModule->stageCreateInfo);
|
||||
}
|
||||
|
||||
|
||||
pipeline.graphicsPipeline->pipelineInfo.pStages = pipeline.graphicsPipeline->shaderStages.data();
|
||||
}
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineSetVertexDescriptionFn(gnGraphicsPipeline& pipeline, const gnVertexDescription& vertexDescription) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.graphicsPipeline->vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
|
||||
pipeline.graphicsPipeline->vertexInputInfo.vertexBindingDescriptionCount = 1;
|
||||
pipeline.graphicsPipeline->vertexInputInfo.vertexAttributeDescriptionCount = vertexDescription.vertexDescription->attributeDescriptionCount;
|
||||
pipeline.graphicsPipeline->vertexInputInfo.pVertexBindingDescriptions = &vertexDescription.vertexDescription->bindingDescription;
|
||||
pipeline.graphicsPipeline->vertexInputInfo.pVertexAttributeDescriptions = vertexDescription.vertexDescription->attributeDescriptions;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnGraphicsPipelineEnableDepthTestFn(gnGraphicsPipeline& pipeline, gnBool depthTest) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
if (depthTest == true) {
|
||||
pipeline.graphicsPipeline->depthStencil.depthTestEnable = (depthTest == true) ? VK_TRUE : VK_FALSE;
|
||||
} else {
|
||||
pipeline.graphicsPipeline->depthStencil.depthTestEnable = VK_FALSE;
|
||||
}
|
||||
pipeline.graphicsPipeline->depthStencil.depthWriteEnable = (depthTest == true) ? VK_TRUE : VK_FALSE;
|
||||
pipeline.graphicsPipeline->depthStencil.depthCompareOp = VK_COMPARE_OP_LESS;
|
||||
pipeline.graphicsPipeline->depthStencil.depthBoundsTestEnable = VK_FALSE;
|
||||
pipeline.graphicsPipeline->depthStencil.minDepthBounds = 0.0f; // Optional
|
||||
pipeline.graphicsPipeline->depthStencil.maxDepthBounds = 1.0f; // Optional
|
||||
pipeline.graphicsPipeline->depthStencil.stencilTestEnable = VK_FALSE;
|
||||
pipeline.graphicsPipeline->depthStencil.front = {}; // Optional
|
||||
pipeline.graphicsPipeline->depthStencil.back = {}; // Optional
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateGraphicsPipelineFn(gnGraphicsPipeline* pipeline, gnOutputDevice& outputDevice) {
|
||||
vulkanCreateGraphicsPipeline(pipeline);
|
||||
pipeline->graphicsPipeline->commandPool = &outputDevice.outputDevice->commandPool;
|
||||
|
||||
std::vector<VkDescriptorSetLayout> descriptorLayouts;
|
||||
for (int i = 0; i < pipeline->uniformLayouts.size(); i++) {
|
||||
descriptorLayouts.push_back(pipeline->uniformLayouts[i]->uniformLayout->setLayout);
|
||||
}
|
||||
|
||||
VkPipelineLayoutCreateInfo pipelineLayoutInfo{};
|
||||
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
|
||||
pipelineLayoutInfo.setLayoutCount = static_cast<uint32_t>(pipeline->uniformLayouts.size());
|
||||
pipelineLayoutInfo.pSetLayouts = descriptorLayouts.data();
|
||||
|
||||
std::vector<VkPushConstantRange> pushConstantRanges = {};
|
||||
for (int i = 0; i < pipeline->pushConstants.size(); i++ ) {
|
||||
VkPushConstantRange range;
|
||||
|
||||
range.offset = pipeline->pushConstants[i]->offset;
|
||||
range.size = pipeline->pushConstants[i]->size;
|
||||
|
||||
int stageBit = 0;
|
||||
|
||||
if (gnContainsShaderStage(pipeline->pushConstants[i]->stage, GN_VERTEX_SHADER_MODULE)) stageBit |= VK_SHADER_STAGE_VERTEX_BIT;
|
||||
if (gnContainsShaderStage(pipeline->pushConstants[i]->stage, GN_FRAGMENT_SHADER_MODULE)) stageBit |= VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
|
||||
range.stageFlags = stageBit;
|
||||
|
||||
pushConstantRanges.push_back(range);
|
||||
}
|
||||
|
||||
pipelineLayoutInfo.pPushConstantRanges = pushConstantRanges.data();
|
||||
pipelineLayoutInfo.pushConstantRangeCount = pipeline->pushConstants.size();
|
||||
|
||||
if (vkCreatePipelineLayout(outputDevice.outputDevice->device, &pipelineLayoutInfo, nullptr, &pipeline->graphicsPipeline->pipelineLayout) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
for (int i = 0; i < pipeline->pushConstants.size(); i++ ) {
|
||||
if (pipeline->pushConstants[i]->pushConstant == nullptr) { pipeline->pushConstants[i]->pushConstant = new gnPlatformPushConstant(); }
|
||||
pipeline->pushConstants[i]->pushConstant->pushConstantRange = pushConstantRanges[i];
|
||||
pipeline->pushConstants[i]->pushConstant->graphicsPipeline = pipeline;
|
||||
}
|
||||
|
||||
pipeline->graphicsPipeline->outputDevice = &outputDevice;
|
||||
|
||||
{
|
||||
pipeline->graphicsPipeline->pipelineInfo.pVertexInputState = &pipeline->graphicsPipeline->vertexInputInfo;
|
||||
pipeline->graphicsPipeline->pipelineInfo.pInputAssemblyState = &pipeline->graphicsPipeline->inputAssembly;
|
||||
pipeline->graphicsPipeline->pipelineInfo.pViewportState = &pipeline->graphicsPipeline->viewportState;
|
||||
pipeline->graphicsPipeline->pipelineInfo.pRasterizationState = &pipeline->graphicsPipeline->rasterizer;
|
||||
pipeline->graphicsPipeline->pipelineInfo.pMultisampleState = &pipeline->graphicsPipeline->multisampling;
|
||||
pipeline->graphicsPipeline->pipelineInfo.pDepthStencilState = nullptr; // Optional
|
||||
pipeline->graphicsPipeline->pipelineInfo.pColorBlendState = &pipeline->graphicsPipeline->colorBlending;
|
||||
pipeline->graphicsPipeline->pipelineInfo.pDynamicState = &pipeline->graphicsPipeline->dynamicState;
|
||||
pipeline->graphicsPipeline->pipelineInfo.pDepthStencilState = &pipeline->graphicsPipeline->depthStencil;
|
||||
|
||||
pipeline->graphicsPipeline->pipelineInfo.layout = pipeline->graphicsPipeline->pipelineLayout;
|
||||
|
||||
// for (int i = 0; i < pipeline->descriptorSet->descriptorSet->descriptorSets.size(); i++) {
|
||||
// (*pipeline->uniformBufferDescriptors)[i].bufferDescription->descriptorSet = pipeline->descriptorSet->descriptorSet->descriptorSets[i];
|
||||
// }
|
||||
|
||||
if (vkCreateGraphicsPipelines(outputDevice.outputDevice->device, VK_NULL_HANDLE, 1, &pipeline->graphicsPipeline->pipelineInfo, nullptr, &pipeline->graphicsPipeline->graphicsPipeline) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineAddPushConstantFn(gnGraphicsPipeline& pipeline, const gnPushConstant& pushConstant) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.pushConstants.push_back(const_cast<gnPushConstant*>(&pushConstant));
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineSetUniformBufferDescriptorsFn(gnGraphicsPipeline& pipeline, const std::vector<gnBufferDescription>& bufferDescriptions) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
//graphicsPipeline.uniformBufferDescriptors = const_cast<std::vector<gnBufferDescription>*>(&bufferDescriptions);
|
||||
}
|
||||
GN_EXPORT void gnGraphicsPipelineAddUniformLayoutFn(gnGraphicsPipeline& pipeline, const gnUniformLayout& uniformLayout) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
pipeline.uniformLayouts.push_back(const_cast<gnUniformLayout*>(&uniformLayout));
|
||||
}
|
||||
GN_EXPORT void gnDestroyGraphicsPipelineFn(gnGraphicsPipeline& pipeline) {
|
||||
vulkanCreateGraphicsPipeline(&pipeline);
|
||||
vkDestroyPipeline(pipeline.graphicsPipeline->outputDevice->outputDevice->device, pipeline.graphicsPipeline->graphicsPipeline, nullptr);
|
||||
vkDestroyPipelineLayout(pipeline.graphicsPipeline->outputDevice->outputDevice->device, pipeline.graphicsPipeline->pipelineLayout, nullptr);
|
||||
}
|
@@ -1,47 +0,0 @@
|
||||
#pragma once
|
||||
#include "vulkan/vulkan.h"
|
||||
#include "core/graphics_pipeline/gryphn_graphics_pipeline.h"
|
||||
#include "core/output_device/gryphn_output_device.h"
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
#include "vector"
|
||||
|
||||
struct gnPlatformGraphicsPipeline {
|
||||
VkPipelineDynamicStateCreateInfo dynamicState{};
|
||||
gnBool dynamicStatesEnabled = false;
|
||||
std::vector<VkDynamicState> dynamicStates = {};
|
||||
|
||||
VkPipelineVertexInputStateCreateInfo vertexInputInfo{};
|
||||
|
||||
VkPipelineInputAssemblyStateCreateInfo inputAssembly{};
|
||||
VkViewport viewport{};
|
||||
VkRect2D scissor{};
|
||||
VkPipelineRasterizationStateCreateInfo rasterizer{};
|
||||
|
||||
bool createdDynamicViewportState = false;
|
||||
VkPipelineViewportStateCreateInfo viewportState{};
|
||||
|
||||
VkPipelineMultisampleStateCreateInfo multisampling{};
|
||||
VkPipelineColorBlendAttachmentState colorBlendAttachment{};
|
||||
VkPipelineColorBlendStateCreateInfo colorBlending{};
|
||||
VkPipelineDepthStencilStateCreateInfo depthStencil{};
|
||||
|
||||
VkGraphicsPipelineCreateInfo pipelineInfo{};
|
||||
|
||||
std::vector<VkDynamicState> vulkanDynamicStates = {};
|
||||
std::vector<VkPipelineShaderStageCreateInfo> shaderStages = {};
|
||||
|
||||
bool dynamicStateEnabled(VkDynamicState dynamicState) {
|
||||
for (int i = 0; i < dynamicStates.size(); i++) {
|
||||
if (dynamicStates[i] == dynamicState) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
VkPipelineLayout pipelineLayout;
|
||||
VkPipeline graphicsPipeline;
|
||||
gnOutputDevice* outputDevice;
|
||||
|
||||
VkCommandPool* commandPool;
|
||||
};
|
@@ -1,115 +0,0 @@
|
||||
#include "vulkan_renderpass.h"
|
||||
#include "core/graphics_pipeline/gryphn_render_pass.h"
|
||||
#include "core/output_device/gryphn_output_device.h"
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "vector"
|
||||
#include "../textures/vulkan_texture.h"
|
||||
#include "../presentation_queue/vulkan_presentation_queue.h"
|
||||
|
||||
struct gnPlatformRenderpassAttachment {
|
||||
VkAttachmentDescription attachment{};
|
||||
VkAttachmentReference attachmentRef{};
|
||||
};
|
||||
|
||||
void vulkanCreateSubpass(gnSubpass* subpass) {
|
||||
if (subpass->subpass == nullptr) subpass->subpass = new gnPlatformSubpass();
|
||||
|
||||
subpass->subpass->subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
|
||||
|
||||
subpass->subpass->dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
|
||||
subpass->subpass->dependency.dstSubpass = 0;
|
||||
|
||||
subpass->subpass->dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
|
||||
subpass->subpass->dependency.srcAccessMask = 0;
|
||||
|
||||
subpass->subpass->dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
|
||||
subpass->subpass->dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
|
||||
|
||||
}
|
||||
|
||||
void vulkanCreateRenderpass(gnRenderPass* renderpass) {
|
||||
if (renderpass->renderpass == nullptr) renderpass->renderpass = new gnPlatformRenderPass();
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateRenderPassFn(gnRenderPass* renderPass, const gnOutputDevice& device) {
|
||||
vulkanCreateRenderpass(renderPass);
|
||||
renderPass->renderpass->outputDevice = const_cast<gnOutputDevice*>(&device);
|
||||
|
||||
std::vector<VkAttachmentDescription> attachments;
|
||||
std::vector<VkAttachmentReference> attachmentRefs;
|
||||
|
||||
for (int i = 0; i < renderPass->attachmentCount; i++) {
|
||||
VkAttachmentDescription attachment{};
|
||||
attachment.samples = VK_SAMPLE_COUNT_1_BIT;
|
||||
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
|
||||
attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
|
||||
attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
||||
attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
||||
attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
|
||||
|
||||
// attachment->renderpassAttachment->attachment.samples = VK_SAMPLE_COUNT_1_BIT;
|
||||
// attachment->renderpassAttachment->attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
|
||||
// attachment->renderpassAttachment->attachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
||||
// attachment->renderpassAttachment->attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
||||
// attachment->renderpassAttachment->attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
||||
// attachment->renderpassAttachment->attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
// attachment->renderpassAttachment->attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
||||
|
||||
VkAttachmentReference colorAttachmentRef{};
|
||||
colorAttachmentRef.attachment = i;
|
||||
|
||||
if (renderPass->presentationQueue == nullptr) {
|
||||
return gnReturnError(GN_FAILED_CREATE_RENDERPASS, "the presentation queue has not been set");
|
||||
}
|
||||
|
||||
if (renderPass->attachments[i].colorMode == GN_RGBA8) {
|
||||
attachment.format = renderPass->presentationQueue->presentationQueue->swapchainDetails.surfaceFormat.format;
|
||||
attachment.finalLayout = (renderPass->target == GN_SHADER_READ) ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
|
||||
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
|
||||
} else if (renderPass->attachments[i].colorMode == GN_DEPTH_STENCIL) {
|
||||
VkFormat format;
|
||||
findDepthFormat(device, format);
|
||||
|
||||
attachment.format = format;
|
||||
attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
|
||||
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
||||
}
|
||||
|
||||
attachments.push_back(attachment);
|
||||
attachmentRefs.push_back(colorAttachmentRef);
|
||||
}
|
||||
|
||||
VkSubpassDescription subpass{};
|
||||
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
|
||||
subpass.colorAttachmentCount = 1;
|
||||
subpass.pColorAttachments = &attachmentRefs[0];
|
||||
subpass.pDepthStencilAttachment = &attachmentRefs[1];
|
||||
|
||||
VkSubpassDependency dependency{};
|
||||
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
|
||||
dependency.dstSubpass = 0;
|
||||
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
|
||||
dependency.srcAccessMask = 0;
|
||||
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
|
||||
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
||||
|
||||
VkRenderPassCreateInfo renderPassInfo{};
|
||||
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
|
||||
renderPassInfo.attachmentCount = static_cast<uint32_t>(attachments.size());
|
||||
renderPassInfo.pAttachments = attachments.data();
|
||||
renderPassInfo.subpassCount = 1;
|
||||
renderPassInfo.pSubpasses = &subpass;
|
||||
renderPassInfo.dependencyCount = 1;
|
||||
renderPassInfo.pDependencies = &dependency;
|
||||
|
||||
if (vkCreateRenderPass(device.outputDevice->device, &renderPassInfo, nullptr, &renderPass->renderpass->renderPass) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyRenderPassFn(gnRenderPass& renderPass) {
|
||||
vkDestroyRenderPass(renderPass.renderpass->outputDevice->outputDevice->device, renderPass.renderpass->renderPass, nullptr);
|
||||
}
|
@@ -1,17 +0,0 @@
|
||||
#pragma once
|
||||
#include "vulkan/vulkan.h"
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
|
||||
struct gnPlatformSubpass {
|
||||
VkSubpassDescription subpass{};
|
||||
VkSubpassDependency dependency{};
|
||||
};
|
||||
|
||||
struct gnPlatformRenderPass {
|
||||
VkRenderPass renderPass;
|
||||
gnOutputDevice* outputDevice;
|
||||
};
|
||||
|
||||
struct gnPlatformRenderPassFrame {
|
||||
VkRenderPassBeginInfo renderPassInfo{};
|
||||
};
|
@@ -1,18 +0,0 @@
|
||||
#include "core/graphics_pipeline/gryphn_render_pass_frame.h"
|
||||
#include "../framebuffers/vulkan_framebuffer.h"
|
||||
#include "vulkan_renderpass.h"
|
||||
|
||||
// void vkRenderPassFrame(gnRenderPassFrame* renderPassFrame) {
|
||||
// if (renderPassFrame->renderPassFrame == nullptr) renderPassFrame->renderPassFrame = new gnPlatformRenderPassFrame();
|
||||
// renderPassFrame->renderPassFrame->renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
|
||||
// }
|
||||
|
||||
// std::array<VkClearValue, 2> clearValues{};
|
||||
|
||||
// void gnRenderPassFrameSetClearColor(gnRenderPassFrame& frame, gnColor clearColor) {
|
||||
// clearValues[0].color = {{clearColor.r / 255.0f, clearColor.g / 255.0f, clearColor.b / 255.0f, clearColor.a}};
|
||||
// clearValues[1].depthStencil = {1.0f, 0};
|
||||
|
||||
// frame.renderPassFrame->renderPassInfo.clearValueCount = static_cast<uint32_t>(clearValues.size());
|
||||
// frame.renderPassFrame->renderPassInfo.pClearValues = clearValues.data();
|
||||
// }
|
@@ -1,4 +1,4 @@
|
||||
#include "vulkan_presentation_queue.h"
|
||||
#include <presentation_queue/vulkan_presentation_queue.h>
|
||||
#include "vulkan_swapchain_support.h"
|
||||
#include <output_device/vulkan_physical_device.h>
|
||||
#include "vulkan_surface/vulkan_surface.h"
|
||||
|
@@ -21,3 +21,15 @@ struct vkSwapchainSupportDetails_t vkGetSwapchainSupport(
|
||||
|
||||
return details;
|
||||
}
|
||||
|
||||
struct vkSwapchainDetails_t vkGetSwapchainDetails(
|
||||
const struct vkSwapchainSupportDetails_t supportDetails
|
||||
) {
|
||||
struct vkSwapchainDetails_t details;
|
||||
|
||||
for (int i = 0; i < supportDetails.formatCount; i++) {
|
||||
// if (supportDetails.)
|
||||
}
|
||||
|
||||
return details;
|
||||
}
|
||||
|
@@ -11,7 +11,15 @@ typedef struct vkSwapchainSupportDetails_t {
|
||||
VkPresentModeKHR* presentModes;
|
||||
} vkSwapchainSupportDetails;
|
||||
|
||||
typedef struct vkSwapchainDetails_t {
|
||||
VkSurfaceFormatKHR surfaceFormat;
|
||||
} vkSwapchainDetails;
|
||||
|
||||
struct vkSwapchainSupportDetails_t vkGetSwapchainSupport(
|
||||
const VkPhysicalDevice device,
|
||||
const VkSurfaceKHR surface
|
||||
);
|
||||
|
||||
struct vkSwapchainDetails_t vkGetSwapchainDetails(
|
||||
const struct vkSwapchainSupportDetails_t supportDetails
|
||||
);
|
||||
|
@@ -1,9 +0,0 @@
|
||||
#pragma once
|
||||
#include <core/push_constant/gryphn_push_constant.h>
|
||||
#include <vulkan/vulkan.h>
|
||||
struct gnGraphicsPipeline;
|
||||
|
||||
struct gnPlatformPushConstant {
|
||||
VkPushConstantRange pushConstantRange;
|
||||
gnGraphicsPipeline* graphicsPipeline;
|
||||
};
|
@@ -1,35 +0,0 @@
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "core/shaders/gryphn_shader.h"
|
||||
#include "vulkan_shader_module.h"
|
||||
|
||||
VkShaderStageFlagBits vulkanShaderModuleType(gnShaderModuleStage name) {
|
||||
if (name == GN_VERTEX_SHADER_MODULE) return VK_SHADER_STAGE_VERTEX_BIT;
|
||||
if (name == GN_FRAGMENT_SHADER_MODULE) return VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
return VK_SHADER_STAGE_VERTEX_BIT; // assume that shits a vertex shader
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnBuildShaderFn(gnShader* shader) {
|
||||
for (int i = 0; i < gnListLength(shader->shaderModules); i++) {
|
||||
gnShaderModule* module = gnListGetPtr(shader->shaderModules, i);
|
||||
// std::cout << "Building ";
|
||||
// if (module->shaderType == GN_VERTEX_SHADER_MODULE) std::cout << "GN_VERTEX_SHADER_MODULE";
|
||||
// if (module->shaderType == GN_FRAGMENT_SHADER_MODULE) std::cout << "GN_FRAGMENT_SHADER_MODULE";
|
||||
// std::cout << " shader\n";
|
||||
|
||||
|
||||
module->shaderModule->stageCreateInfo = {};
|
||||
module->shaderModule->stageCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
module->shaderModule->stageCreateInfo.stage = vulkanShaderModuleType(module->shaderType);
|
||||
module->shaderModule->stageCreateInfo.module = module->shaderModule->module;
|
||||
module->shaderModule->stageCreateInfo.pName = "main";
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnShaderDestroyModulesFn(gnShader& shader) {
|
||||
for (int i = 0; i < gnListLength(shader.shaderModules); i++) {
|
||||
gnShaderModule* module = gnListGetPtr(shader.shaderModules, i);
|
||||
gnDestroyShaderModule(*module);
|
||||
}
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
#include "vulkan_shader_module.h"
|
||||
|
||||
struct gnPlatformShader {
|
||||
|
||||
};
|
@@ -1,34 +0,0 @@
|
||||
#include "core/shaders/gryphn_shader_module.h"
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "vulkan_shader_module.h"
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
|
||||
void vulkanShaderModule(gnShaderModule* shaderModule) {
|
||||
if (shaderModule->shaderModule == nullptr) shaderModule->shaderModule = new gnPlatformShaderModule();
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnBuildShaderModuleFn(gnShaderModule* module, const gnOutputDevice& outputDevice) {
|
||||
vulkanShaderModule(module);
|
||||
if (module->codeSize < 0 || module->shaderData == nullptr) {
|
||||
// TODO: add in error codes so that I can pick up on these errors and not just return that the creation failed
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
if (module->shaderUse == GN_GRAPHICS_PIPELINE) {
|
||||
VkShaderModuleCreateInfo createInfo{};
|
||||
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
|
||||
createInfo.codeSize = module->codeSize;
|
||||
createInfo.pCode = reinterpret_cast<const uint32_t*>(module->shaderData);
|
||||
|
||||
if (vkCreateShaderModule(outputDevice.outputDevice->device, &createInfo, nullptr, &module->shaderModule->module) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
module->shaderModule->device = &outputDevice;
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyShaderModuleFn(gnShaderModule& module) {
|
||||
vkDestroyShaderModule(const_cast<gnOutputDevice*>(module.shaderModule->device)->outputDevice->device, module.shaderModule->module, nullptr);
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "core/output_device/gryphn_output_device.h"
|
||||
#include <vulkan/vulkan_core.h>
|
||||
|
||||
struct gnPlatformShaderModule {
|
||||
VkShaderModule module;
|
||||
VkShaderEXT shader;
|
||||
VkPipelineShaderStageCreateInfo stageCreateInfo;
|
||||
const gnOutputDevice* device;
|
||||
};
|
@@ -1,27 +0,0 @@
|
||||
#include "vulkan_fence.h"
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateFenceFn(gnFence* fence, const gnOutputDevice& device) {
|
||||
if (fence->fence == nullptr) fence->fence = new gnPlatformFence();
|
||||
|
||||
VkFenceCreateInfo fenceInfo{};
|
||||
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
||||
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
|
||||
|
||||
if (vkCreateFence(device.outputDevice->device, &fenceInfo, nullptr, &fence->fence->fence) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
fence->fence->device = const_cast<gnOutputDevice*>(&device);
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
GN_EXPORT void gnWaitForFenceFn(const gnFence& fence) {
|
||||
vkWaitForFences(fence.fence->device->outputDevice->device, 1, &fence.fence->fence, VK_TRUE, UINT64_MAX);
|
||||
}
|
||||
GN_EXPORT void gnResetFenceFn(const gnFence& fence) {
|
||||
vkResetFences(fence.fence->device->outputDevice->device, 1, &fence.fence->fence);
|
||||
}
|
||||
GN_EXPORT void gnDestroyFenceFn(const gnFence& fence) {
|
||||
vkDestroyFence(fence.fence->device->outputDevice->device, fence.fence->fence, nullptr);
|
||||
}
|
@@ -1,8 +0,0 @@
|
||||
#include <gryphn/gryphn_utils.h>
|
||||
#include <vulkan/vulkan.h>
|
||||
#include <core/sync_objects/gryphn_fence.h>
|
||||
|
||||
struct gnPlatformFence {
|
||||
VkFence fence;
|
||||
gnOutputDevice* device;
|
||||
};
|
@@ -1,20 +0,0 @@
|
||||
#include "vulkan_sync_semaphore.h"
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateSyncSemaphoreFn(gnSyncSemaphore* semaphore, const gnOutputDevice& device) {
|
||||
if (semaphore->semaphore == nullptr) semaphore->semaphore = new gnPlatformSyncSemaphore();
|
||||
|
||||
VkSemaphoreCreateInfo semaphoreInfo{};
|
||||
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
||||
|
||||
if (vkCreateSemaphore(device.outputDevice->device, &semaphoreInfo, nullptr, &semaphore->semaphore->semaphore) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
semaphore->semaphore->device = const_cast<gnOutputDevice*>(&device);
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroySyncSemaphoreFn(const gnSyncSemaphore& semaphore) {
|
||||
vkDestroySemaphore(semaphore.semaphore->device->outputDevice->device, semaphore.semaphore->semaphore, nullptr);
|
||||
}
|
@@ -1,9 +0,0 @@
|
||||
#include <gryphn/gryphn_utils.h>
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
#include "core/sync_objects/gryphn_sync_semaphore.h"
|
||||
|
||||
struct gnPlatformSyncSemaphore {
|
||||
VkSemaphore semaphore;
|
||||
gnOutputDevice* device;
|
||||
};
|
@@ -1,238 +0,0 @@
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "gryphn/gryphn_utils.h"
|
||||
#include "vulkan_texture.h"
|
||||
#include "core/textures/gryphn_texture.h"
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
#include "../vertex_buffers/vulkan_buffers.h"
|
||||
|
||||
void vulkanTexture(gnTexture* texture) {
|
||||
if (texture->texture == nullptr) texture->texture = new gnPlatformTexture();
|
||||
}
|
||||
|
||||
gnReturnCode findDepthFormat(const gnOutputDevice& outputDevice, VkFormat& format) {
|
||||
return findSupportedFormat(outputDevice,
|
||||
{VK_FORMAT_D32_SFLOAT, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT},
|
||||
VK_IMAGE_TILING_OPTIMAL,
|
||||
VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, format
|
||||
);
|
||||
}
|
||||
|
||||
VkFormat findDepthFormat(const gnOutputDevice& outputDevice) {
|
||||
VkFormat format;
|
||||
findSupportedFormat(outputDevice,
|
||||
{VK_FORMAT_D32_SFLOAT, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT},
|
||||
VK_IMAGE_TILING_OPTIMAL,
|
||||
VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, format
|
||||
);
|
||||
return format;
|
||||
}
|
||||
|
||||
|
||||
VkFormat vulkanFormatFromGryphnFormat(const gnOutputDevice& outputDevice, gnColorMode colorMode) {
|
||||
switch (colorMode) {
|
||||
case GN_RED: return VK_FORMAT_R8_UNORM;
|
||||
case GN_RGB8: return VK_FORMAT_R8G8B8A8_SRGB;
|
||||
case GN_RGBA8: return VK_FORMAT_R8G8B8A8_SRGB;
|
||||
case GN_BGRA8: return VK_FORMAT_B8G8R8A8_SRGB;
|
||||
case GN_DEPTH_STENCIL: return findDepthFormat(outputDevice);
|
||||
}
|
||||
return VK_FORMAT_R8_UNORM;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnTextureDataFn(const gnTexture& texture, gnSize dataSize, const void* inputData) {
|
||||
texture.texture->size = dataSize;
|
||||
texture.texture->data = inputData;
|
||||
|
||||
vulkanCreateBuffer(
|
||||
*texture.texture->outputDevice, texture.texture->size,
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
||||
texture.texture->stagingBuffer, texture.texture->stagingBufferMemory
|
||||
);
|
||||
|
||||
void* data;
|
||||
vkMapMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory, 0, texture.texture->size, 0, &data);
|
||||
memcpy(data, texture.texture->data, texture.texture->size);
|
||||
vkUnmapMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory);
|
||||
|
||||
VkFormat format = vulkanFormatFromGryphnFormat(*texture.texture->outputDevice, texture.textureColorFormat);
|
||||
|
||||
transitionImageLayout(*texture.texture->outputDevice, texture.texture->textureImage, format, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
|
||||
copyBufferToImage(*texture.texture->outputDevice, texture.texture->stagingBuffer, texture.texture->textureImage, texture.textureExtent.x, texture.textureExtent.y);
|
||||
transitionImageLayout(*texture.texture->outputDevice, texture.texture->textureImage, format, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
|
||||
vkDestroyBuffer(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBuffer, nullptr);
|
||||
vkFreeMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory, nullptr);
|
||||
}
|
||||
GN_EXPORT void gnTextureCubeMapDataFn(const gnTexture& texture, gnSize imageDataSize, void* face1, void* face2, void* face3, void* face4, void* face5, void* face6) {
|
||||
texture.texture->size = imageDataSize;
|
||||
uint32_t faceSize = imageDataSize / 6;
|
||||
if (vulkanCreateBuffer(
|
||||
*texture.texture->outputDevice, texture.texture->size,
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
||||
texture.texture->stagingBuffer, texture.texture->stagingBufferMemory
|
||||
) != GN_SUCCESS) {
|
||||
// return GN_FAILED;
|
||||
}
|
||||
|
||||
void* data;
|
||||
vkMapMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory, 0, texture.texture->size, 0, &data);
|
||||
|
||||
memcpy((char*)data + (faceSize * 0), face1, faceSize);
|
||||
memcpy((char*)data + (faceSize * 1), face2, faceSize);
|
||||
memcpy((char*)data + (faceSize * 2), face3, faceSize);
|
||||
memcpy((char*)data + (faceSize * 3), face4, faceSize);
|
||||
memcpy((char*)data + (faceSize * 4), face5, faceSize);
|
||||
memcpy((char*)data + (faceSize * 5), face6, faceSize);
|
||||
|
||||
vkUnmapMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory);
|
||||
|
||||
VkFormat format = vulkanFormatFromGryphnFormat(*texture.texture->outputDevice, texture.textureColorFormat);
|
||||
transitionImageLayout(*texture.texture->outputDevice, texture.texture->textureImage, format, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 6);
|
||||
copyBufferToImage(*texture.texture->outputDevice, texture.texture->stagingBuffer, texture.texture->textureImage, texture.textureExtent.x, texture.textureExtent.y, 6);
|
||||
transitionImageLayout(*texture.texture->outputDevice, texture.texture->textureImage, format, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 6);
|
||||
|
||||
vkDestroyBuffer(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBuffer, nullptr);
|
||||
vkFreeMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory, nullptr);
|
||||
}
|
||||
|
||||
void gnTextureSubData(const gnTexture& texture, gnSize offset, gnSize dataSize, void* inputData) {
|
||||
texture.texture->size = texture.dataSize;
|
||||
|
||||
if (vulkanCreateBuffer(
|
||||
*texture.texture->outputDevice, texture.texture->size,
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
||||
texture.texture->stagingBuffer, texture.texture->stagingBufferMemory
|
||||
) != GN_SUCCESS) {
|
||||
// return GN_FAILED;
|
||||
}
|
||||
|
||||
void* data;
|
||||
vkMapMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory, 0, texture.texture->size, 0, &data);
|
||||
memcpy((char*)data + offset, inputData, dataSize);
|
||||
vkUnmapMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory);
|
||||
|
||||
VkFormat format = vulkanFormatFromGryphnFormat(*texture.texture->outputDevice, texture.textureColorFormat);
|
||||
transitionImageLayout(*texture.texture->outputDevice, texture.texture->textureImage, format, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
|
||||
copyBufferToImage(*texture.texture->outputDevice, texture.texture->stagingBuffer, texture.texture->textureImage, texture.textureExtent.x, texture.textureExtent.y);
|
||||
transitionImageLayout(*texture.texture->outputDevice, texture.texture->textureImage, format, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
|
||||
vkDestroyBuffer(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBuffer, nullptr);
|
||||
vkFreeMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->stagingBufferMemory, nullptr);
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateTextureFn(gnTexture* texture, const gnOutputDevice& outputDevice) {
|
||||
if (texture->texture == nullptr) texture->texture = new gnPlatformTexture();
|
||||
texture->texture->outputDevice = const_cast<gnOutputDevice*>(&outputDevice);
|
||||
|
||||
// VkFormat textureFormat
|
||||
|
||||
if (texture->textureType == GN_TEXTURE_2D) {
|
||||
if (texture->textureColorFormat == GN_RGBA8) {
|
||||
gnReturnCode res = vulkanCreateImage(outputDevice, texture->textureExtent.x, texture->textureExtent.y,
|
||||
VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
texture->texture->textureImage, texture->texture->textureImageMemory);
|
||||
if (res != GN_SUCCESS) return res;
|
||||
|
||||
gnReturnCode errorCode = createImageView(outputDevice, texture->texture->textureImage, VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_ASPECT_COLOR_BIT, &texture->texture->textureImageView);
|
||||
if (errorCode != GN_SUCCESS) return errorCode;
|
||||
} else if (texture->textureColorFormat == GN_RGB8) {
|
||||
VkFormat textureFormat = VK_FORMAT_R8G8B8A8_SRGB;
|
||||
|
||||
// if (formatSupported(outputDevice,
|
||||
// VK_FORMAT_R8G8B8_SRGB,
|
||||
// VK_IMAGE_TILING_OPTIMAL,
|
||||
// VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT
|
||||
// )) {
|
||||
// textureFormat = VK_FORMAT_R8G8B8_SRGB;
|
||||
// }
|
||||
|
||||
if (vulkanCreateImage(outputDevice, texture->textureExtent.x, texture->textureExtent.y,
|
||||
textureFormat, VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
texture->texture->textureImage, texture->texture->textureImageMemory) != GN_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
gnReturnCode errorCode = createImageView(outputDevice, texture->texture->textureImage, textureFormat, VK_IMAGE_ASPECT_COLOR_BIT, &texture->texture->textureImageView);
|
||||
if (errorCode != GN_SUCCESS) return errorCode;
|
||||
} else if (texture->textureColorFormat == GN_RED) {
|
||||
if (vulkanCreateImage(outputDevice, texture->textureExtent.x, texture->textureExtent.y,
|
||||
VK_FORMAT_R8_UNORM, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
texture->texture->textureImage, texture->texture->textureImageMemory) != GN_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
gnReturnCode errorCode = createImageView(outputDevice, texture->texture->textureImage, VK_FORMAT_R8_UNORM, VK_IMAGE_ASPECT_COLOR_BIT, &texture->texture->textureImageView);
|
||||
|
||||
if (errorCode != GN_SUCCESS) return errorCode;
|
||||
} else if (texture->textureColorFormat == GN_BGRA8) {
|
||||
if (vulkanCreateImage(outputDevice, texture->textureExtent.x, texture->textureExtent.y,
|
||||
VK_FORMAT_B8G8R8A8_SRGB, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
texture->texture->textureImage, texture->texture->textureImageMemory, VK_IMAGE_LAYOUT_UNDEFINED) != GN_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
gnReturnCode errorCode = createImageView(outputDevice, texture->texture->textureImage, VK_FORMAT_B8G8R8A8_SRGB, VK_IMAGE_ASPECT_COLOR_BIT, &texture->texture->textureImageView);
|
||||
if (errorCode != GN_SUCCESS) return errorCode;
|
||||
//GN_ERROR_CODE code = transitionImageLayout(*texture.texture->outputDevice, texture.texture->textureImage, VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
|
||||
|
||||
gnReturnCode transitionErrorCode = transitionImageLayout(outputDevice, texture->texture->textureImage, VK_FORMAT_B8G8R8A8_SRGB, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
if (transitionErrorCode != GN_SUCCESS) return transitionErrorCode;
|
||||
} else if (texture->textureColorFormat == GN_DEPTH_STENCIL) {
|
||||
VkFormat depthFormat;
|
||||
gnReturnCode depthFormatReturnCode = findDepthFormat(outputDevice, depthFormat);
|
||||
if (depthFormatReturnCode != GN_SUCCESS) return depthFormatReturnCode;
|
||||
|
||||
if (vulkanCreateImage(outputDevice, texture->textureExtent.x, texture->textureExtent.y,
|
||||
depthFormat, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
texture->texture->textureImage, texture->texture->textureImageMemory) != GN_SUCCESS) {
|
||||
return gnReturnError(GN_FAILED_TO_CREATE_IMAGE, "Failed to create depth image");
|
||||
}
|
||||
|
||||
gnReturnCode errorCode = createImageView(outputDevice, texture->texture->textureImage, depthFormat, VK_IMAGE_ASPECT_DEPTH_BIT, &texture->texture->textureImageView);
|
||||
if (transitionImageLayout(outputDevice, texture->texture->textureImage, depthFormat, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) != GN_SUCCESS) return gnReturnError(GN_FAILED_TO_CREATE_IMAGE, "Failed to transition image layout");
|
||||
}
|
||||
} else {
|
||||
if (vulkanCreateCubeMap(outputDevice, texture->textureExtent.x, texture->textureExtent.y, texture->texture->textureImage, texture->texture->textureImageMemory) != GN_SUCCESS)
|
||||
return GN_FAILED;
|
||||
if (vulkanCreateCubeMapImageView(outputDevice, texture->texture->textureImage, &texture->texture->textureImageView) != GN_SUCCESS)
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
{
|
||||
VkSamplerCreateInfo samplerInfo{};
|
||||
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
|
||||
samplerInfo.magFilter = (texture->magFilter == GN_FILTER_LINEAR) ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
|
||||
samplerInfo.minFilter = (texture->minFilter == GN_FILTER_LINEAR) ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
|
||||
|
||||
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
|
||||
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
|
||||
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
|
||||
|
||||
samplerInfo.anisotropyEnable = VK_TRUE;
|
||||
|
||||
VkPhysicalDeviceProperties properties{};
|
||||
vkGetPhysicalDeviceProperties(outputDevice.physicalOutputDevice->physicalOutputDevice->device, &properties);
|
||||
samplerInfo.maxAnisotropy = properties.limits.maxSamplerAnisotropy;
|
||||
samplerInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
|
||||
samplerInfo.unnormalizedCoordinates = VK_FALSE;
|
||||
samplerInfo.compareEnable = VK_FALSE;
|
||||
samplerInfo.compareOp = VK_COMPARE_OP_ALWAYS;
|
||||
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
|
||||
samplerInfo.mipLodBias = 0.0f;
|
||||
samplerInfo.minLod = 0.0f;
|
||||
samplerInfo.maxLod = 0.0f;
|
||||
|
||||
if (vkCreateSampler(outputDevice.outputDevice->device, &samplerInfo, nullptr, &texture->texture->textureSampler) != VK_SUCCESS)
|
||||
return gnReturnError(GN_FAILED_TO_CREATE_IMAGE, "Failed to create texture sampler");
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyTextureFn(const gnTexture& texture) {
|
||||
vkDestroySampler(texture.texture->outputDevice->outputDevice->device, texture.texture->textureSampler, nullptr);
|
||||
vkDestroyImageView(texture.texture->outputDevice->outputDevice->device, texture.texture->textureImageView, nullptr);
|
||||
|
||||
vkDestroyImage(texture.texture->outputDevice->outputDevice->device, texture.texture->textureImage, nullptr);
|
||||
vkFreeMemory(texture.texture->outputDevice->outputDevice->device, texture.texture->textureImageMemory, nullptr);
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
#pragma once
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "gryphn/gryphn_utils.h"
|
||||
|
||||
struct gnOutputDevice;
|
||||
|
||||
struct gnPlatformTexture {
|
||||
VkBuffer stagingBuffer;
|
||||
VkDeviceMemory stagingBufferMemory;
|
||||
|
||||
VkImage textureImage;
|
||||
VkImageView textureImageView;
|
||||
VkDeviceMemory textureImageMemory;
|
||||
VkSampler textureSampler;
|
||||
|
||||
gnSize size;
|
||||
const void* data;
|
||||
|
||||
gnOutputDevice* outputDevice;
|
||||
|
||||
bool swapchainImage = false;
|
||||
};
|
||||
|
||||
gnReturnCode vulkanCreateImage(const gnOutputDevice& device,
|
||||
uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties, VkImage& image, VkDeviceMemory& imageMemory
|
||||
);
|
||||
gnReturnCode vulkanCreateImage(const gnOutputDevice& device,
|
||||
uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties, VkImage& image, VkDeviceMemory& imageMemory, VkImageLayout imageLayout
|
||||
);
|
||||
gnReturnCode vulkanCreateCubeMap(const gnOutputDevice& device, uint32_t width, uint32_t height, VkImage& image, VkDeviceMemory& imageMemory);
|
||||
gnReturnCode vulkanCreateCubeMapImageView(const gnOutputDevice& outputDevice, VkImage image, VkImageView* imageView);
|
||||
|
||||
gnReturnCode createImageView(const gnOutputDevice& outputDevice, VkImage image, VkFormat format, VkImageAspectFlags aspectFlags, VkImageView* imageView);
|
||||
gnReturnCode findSupportedFormat(const gnOutputDevice& outputDevice, const std::vector<VkFormat>& candidates, VkImageTiling tiling, VkFormatFeatureFlags features, VkFormat& outFormat);
|
||||
gnReturnCode transitionImageLayout(const gnOutputDevice& outputDevice, VkImage image, VkFormat format, VkImageLayout oldLayout, VkImageLayout newLayout, int level_count = 1);
|
||||
void copyBufferToImage(const gnOutputDevice& outputDevice, VkBuffer buffer, VkImage image, uint32_t width, uint32_t height, int level_count = 1);
|
||||
gnReturnCode findDepthFormat(const gnOutputDevice& outputDevice, VkFormat& format);
|
||||
bool formatSupported(const gnOutputDevice& outputDevice, const VkFormat format, VkImageTiling tiling, VkFormatFeatureFlags features);
|
@@ -1,284 +0,0 @@
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "gryphn/gryphn_utils.h"
|
||||
#include "vulkan_texture.h"
|
||||
#include "core/textures/gryphn_texture.h"
|
||||
#include "../vertex_buffers/vulkan_buffers.h"
|
||||
#include "../output_device/vulkan_output_devices.h"
|
||||
#include "../commands/vulkan_command_buffer.h"
|
||||
|
||||
void copyBufferToImage(const gnOutputDevice& outputDevice, VkBuffer buffer, VkImage image, uint32_t width, uint32_t height, int level_count) {
|
||||
VkCommandBuffer commandBuffer = beginSingleTimeCommands(outputDevice);
|
||||
|
||||
VkBufferImageCopy region{};
|
||||
region.bufferOffset = 0;
|
||||
region.bufferRowLength = 0;
|
||||
region.bufferImageHeight = 0;
|
||||
|
||||
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
region.imageSubresource.mipLevel = 0;
|
||||
region.imageSubresource.baseArrayLayer = 0;
|
||||
region.imageSubresource.layerCount = level_count;
|
||||
|
||||
region.imageOffset = {0, 0, 0};
|
||||
region.imageExtent = {
|
||||
width,
|
||||
height,
|
||||
1
|
||||
};
|
||||
|
||||
vkCmdCopyBufferToImage(
|
||||
commandBuffer,
|
||||
buffer,
|
||||
image,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
1,
|
||||
®ion
|
||||
);
|
||||
|
||||
endSingleTimeCommands(commandBuffer, outputDevice);
|
||||
}
|
||||
|
||||
bool hasStencilComponent(VkFormat format) {
|
||||
return format == VK_FORMAT_D32_SFLOAT_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT;
|
||||
}
|
||||
|
||||
gnReturnCode transitionImageLayout(const gnOutputDevice& outputDevice, VkImage image, VkFormat format, VkImageLayout oldLayout, VkImageLayout newLayout, int level_count) {
|
||||
VkCommandBuffer commandBuffer = beginSingleTimeCommands(outputDevice);
|
||||
|
||||
VkImageMemoryBarrier barrier{};
|
||||
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
|
||||
barrier.oldLayout = oldLayout;
|
||||
barrier.newLayout = newLayout;
|
||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.image = image;
|
||||
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
barrier.subresourceRange.baseMipLevel = 0;
|
||||
barrier.subresourceRange.levelCount = 1;
|
||||
barrier.subresourceRange.baseArrayLayer = 0;
|
||||
barrier.subresourceRange.layerCount = level_count;
|
||||
|
||||
VkPipelineStageFlags sourceStage;
|
||||
VkPipelineStageFlags destinationStage;
|
||||
|
||||
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
|
||||
barrier.srcAccessMask = 0;
|
||||
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
|
||||
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
|
||||
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
|
||||
} else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
|
||||
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
|
||||
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
|
||||
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
|
||||
} else if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
|
||||
barrier.srcAccessMask = 0;
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
|
||||
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
|
||||
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
|
||||
} else if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
|
||||
barrier.srcAccessMask = 0;
|
||||
barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
||||
|
||||
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
|
||||
destinationStage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
|
||||
} else {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
if (newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
|
||||
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
|
||||
|
||||
if (hasStencilComponent(format)) {
|
||||
barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
|
||||
}
|
||||
} else {
|
||||
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
}
|
||||
|
||||
|
||||
vkCmdPipelineBarrier(
|
||||
commandBuffer,
|
||||
sourceStage, destinationStage,
|
||||
0,
|
||||
0, nullptr,
|
||||
0, nullptr,
|
||||
1, &barrier
|
||||
);
|
||||
|
||||
endSingleTimeCommands(commandBuffer, outputDevice);
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
gnReturnCode vulkanCreateCubeMap(const gnOutputDevice& outputDevice, uint32_t width, uint32_t height, VkImage& image, VkDeviceMemory& imageMemory) {
|
||||
VkImageCreateInfo imageInfo{};
|
||||
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
|
||||
imageInfo.imageType = VK_IMAGE_TYPE_2D;
|
||||
imageInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
|
||||
imageInfo.mipLevels = 1;
|
||||
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
|
||||
imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
|
||||
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
imageInfo.extent = { width, height, 1 };
|
||||
imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
|
||||
imageInfo.arrayLayers = 6;
|
||||
imageInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; // Optional
|
||||
|
||||
if (vkCreateImage(outputDevice.outputDevice->device, &imageInfo, nullptr, &image) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
VkMemoryRequirements memRequirements;
|
||||
vkGetImageMemoryRequirements(outputDevice.outputDevice->device, image, &memRequirements);
|
||||
|
||||
VkMemoryAllocateInfo allocInfo{};
|
||||
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
||||
allocInfo.allocationSize = memRequirements.size;
|
||||
|
||||
uint32_t memoryTypeIndex;
|
||||
if (findMemoryType(
|
||||
outputDevice.physicalOutputDevice->physicalOutputDevice->device,
|
||||
memRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
&memoryTypeIndex) != GN_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
allocInfo.memoryTypeIndex = memoryTypeIndex;
|
||||
|
||||
if (vkAllocateMemory(outputDevice.outputDevice->device, &allocInfo, nullptr, &imageMemory) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
vkBindImageMemory(outputDevice.outputDevice->device, image, imageMemory, 0);
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
gnReturnCode vulkanCreateImage(const gnOutputDevice& outputDevice,
|
||||
uint32_t width, uint32_t height,
|
||||
VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties,
|
||||
VkImage& image, VkDeviceMemory& imageMemory, VkImageLayout imageLayout
|
||||
) {
|
||||
VkImageCreateInfo imageInfo{};
|
||||
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
|
||||
imageInfo.imageType = VK_IMAGE_TYPE_2D;
|
||||
imageInfo.extent.width = static_cast<uint32_t>(width);
|
||||
imageInfo.extent.height = static_cast<uint32_t>(height);
|
||||
imageInfo.extent.depth = 1;
|
||||
imageInfo.mipLevels = 1;
|
||||
imageInfo.arrayLayers = 1;
|
||||
|
||||
imageInfo.format = format;
|
||||
imageInfo.tiling = tiling;
|
||||
imageInfo.initialLayout = imageLayout;
|
||||
imageInfo.usage = usage;
|
||||
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
|
||||
imageInfo.flags = 0; // Optional
|
||||
|
||||
VkResult result = vkCreateImage(outputDevice.outputDevice->device, &imageInfo, nullptr, &image);
|
||||
if (result != VK_SUCCESS) {
|
||||
return gnReturnError(GN_FAILED_TO_CREATE_IMAGE, std::to_string(result).c_str());
|
||||
}
|
||||
|
||||
VkMemoryRequirements memRequirements;
|
||||
vkGetImageMemoryRequirements(outputDevice.outputDevice->device, image, &memRequirements);
|
||||
|
||||
VkMemoryAllocateInfo allocInfo{};
|
||||
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
||||
allocInfo.allocationSize = memRequirements.size;
|
||||
|
||||
uint32_t memoryTypeIndex;
|
||||
if (findMemoryType(
|
||||
outputDevice.physicalOutputDevice->physicalOutputDevice->device,
|
||||
memRequirements.memoryTypeBits, properties,
|
||||
&memoryTypeIndex) != GN_SUCCESS) {
|
||||
return gnReturnError(GN_FAILED_TO_CREATE_IMAGE, "Failed to find memory type");
|
||||
}
|
||||
allocInfo.memoryTypeIndex = memoryTypeIndex;
|
||||
|
||||
if (vkAllocateMemory(outputDevice.outputDevice->device, &allocInfo, nullptr, &imageMemory) != VK_SUCCESS) {
|
||||
return gnReturnError(GN_FAILED_TO_CREATE_IMAGE, "Failed to allocate memory");
|
||||
}
|
||||
|
||||
vkBindImageMemory(outputDevice.outputDevice->device, image, imageMemory, 0);
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
gnReturnCode vulkanCreateImage(const gnOutputDevice& outputDevice,
|
||||
uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties, VkImage& image, VkDeviceMemory& imageMemory
|
||||
) {
|
||||
return vulkanCreateImage(outputDevice, width, height, format, tiling, usage, properties, image, imageMemory, VK_IMAGE_LAYOUT_UNDEFINED);
|
||||
}
|
||||
|
||||
gnReturnCode vulkanCreateCubeMapImageView(const gnOutputDevice& outputDevice, VkImage image, VkImageView* imageView) {
|
||||
VkImageViewCreateInfo viewInfo{};
|
||||
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
||||
viewInfo.image = image;
|
||||
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
|
||||
viewInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
|
||||
viewInfo.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
|
||||
viewInfo.subresourceRange.layerCount = 6;
|
||||
viewInfo.subresourceRange.levelCount = 1;
|
||||
|
||||
if (vkCreateImageView(outputDevice.outputDevice->device, &viewInfo, nullptr, imageView) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
gnReturnCode createImageView(const gnOutputDevice& outputDevice, VkImage image, VkFormat format, VkImageAspectFlags aspectFlags, VkImageView* imageView) {
|
||||
VkImageViewCreateInfo viewInfo{};
|
||||
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
||||
viewInfo.image = image;
|
||||
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
|
||||
viewInfo.format = format;
|
||||
viewInfo.subresourceRange.aspectMask = aspectFlags;
|
||||
viewInfo.subresourceRange.baseMipLevel = 0;
|
||||
viewInfo.subresourceRange.levelCount = 1;
|
||||
viewInfo.subresourceRange.baseArrayLayer = 0;
|
||||
viewInfo.subresourceRange.layerCount = 1;
|
||||
|
||||
VkResult restult = vkCreateImageView(outputDevice.outputDevice->device, &viewInfo, nullptr, imageView);
|
||||
// std::cout << "Result:" << restult << "\n";
|
||||
if (restult != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
gnReturnCode findSupportedFormat(const gnOutputDevice& outputDevice, const std::vector<VkFormat>& candidates, VkImageTiling tiling, VkFormatFeatureFlags features, VkFormat& outFormat) {
|
||||
for (VkFormat format : candidates) {
|
||||
VkFormatProperties props;
|
||||
vkGetPhysicalDeviceFormatProperties(outputDevice.physicalOutputDevice->physicalOutputDevice->device, format, &props);
|
||||
|
||||
if (tiling == VK_IMAGE_TILING_LINEAR && (props.linearTilingFeatures & features) == features) {
|
||||
outFormat = format;
|
||||
return GN_SUCCESS;
|
||||
} else if (tiling == VK_IMAGE_TILING_OPTIMAL && (props.optimalTilingFeatures & features) == features) {
|
||||
outFormat = format;
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
bool formatSupported(const gnOutputDevice& outputDevice, const VkFormat format, VkImageTiling tiling, VkFormatFeatureFlags features) {
|
||||
VkFormatProperties props;
|
||||
vkGetPhysicalDeviceFormatProperties(outputDevice.physicalOutputDevice->physicalOutputDevice->device, format, &props);
|
||||
|
||||
if (tiling == VK_IMAGE_TILING_LINEAR && (props.linearTilingFeatures & features) == features) {
|
||||
return true;
|
||||
} else if (tiling == VK_IMAGE_TILING_OPTIMAL && (props.optimalTilingFeatures & features) == features) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
#include "vulkan/vulkan.h"
|
||||
#include "gryphn/gryphn_utils.h"
|
||||
#include "core/textures/gryphn_texture.h"
|
||||
#include "core/uniform_descriptor/sampler/gryphn_sampler.h"
|
||||
#include "../../textures/vulkan_texture.h"
|
||||
#include "../../output_device/vulkan_output_devices.h"
|
||||
#include "../vulkan_uniform.h"
|
||||
|
||||
GN_EXPORT void gnUpdateSamplerUniformFn(gnSamplerUniform& sampler, const gnOutputDevice& outputDevice) {
|
||||
VkWriteDescriptorSet descriptorWrite{};
|
||||
descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
descriptorWrite.dstSet = sampler.uniform->uniform->descriptorSets[sampler.index];
|
||||
descriptorWrite.dstBinding = sampler.binding;
|
||||
descriptorWrite.dstArrayElement = 0;
|
||||
|
||||
descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
descriptorWrite.descriptorCount = 1;
|
||||
|
||||
VkDescriptorImageInfo imageInfo{};
|
||||
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
imageInfo.imageView = sampler.texture->texture->textureImageView;
|
||||
imageInfo.sampler = sampler.texture->texture->textureSampler;
|
||||
descriptorWrite.pImageInfo = &imageInfo;
|
||||
|
||||
vkUpdateDescriptorSets(outputDevice.outputDevice->device, 1, &descriptorWrite, 0, nullptr);
|
||||
}
|
@@ -1,65 +0,0 @@
|
||||
#include <vulkan/vulkan.h>
|
||||
#include <gryphn/gryphn_utils.h>
|
||||
#include "core/buffers/gryphn_buffer.h"
|
||||
#include "core/uniform_descriptor/uniform_buffer/gryphn_uniform_buffer.h"
|
||||
#include "../../output_device/vulkan_output_devices.h"
|
||||
#include "../vulkan_uniform.h"
|
||||
#include "../../vertex_buffers/vulkan_buffers.h"
|
||||
|
||||
GN_EXPORT void gnUpdateBufferUniformFn(gnBufferUniform& uniformBuffer, const gnOutputDevice& outputDevice) {
|
||||
VkDescriptorBufferInfo bufferInfo{};
|
||||
bufferInfo.buffer = uniformBuffer.buffer->buffer->buffer;
|
||||
bufferInfo.offset = 0;
|
||||
bufferInfo.range = uniformBuffer.size;
|
||||
|
||||
VkWriteDescriptorSet descriptorWrite{};
|
||||
descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
descriptorWrite.dstSet = uniformBuffer.uniform->uniform->descriptorSets[uniformBuffer.index];
|
||||
descriptorWrite.dstBinding = uniformBuffer.binding;
|
||||
descriptorWrite.dstArrayElement = 0;
|
||||
|
||||
descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
descriptorWrite.descriptorCount = 1;
|
||||
|
||||
descriptorWrite.pBufferInfo = &bufferInfo;
|
||||
|
||||
vkUpdateDescriptorSets(outputDevice.outputDevice->device, 1, &descriptorWrite, 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
// #include "vulkan/vulkan.h"
|
||||
// #include "src/uniform_descriptor/uniform_buffer/gryphn_uniform_buffer.h"
|
||||
// #include "vertex_buffers/vulkan_buffers.h"
|
||||
// #include "vertex_buffers/buffer_descriptor/vulkan_buffer_description.h"
|
||||
// #include "output_device/vulkan_output_devices.h"
|
||||
// #include "uniform_descriptor/vulkan_descriptor_set.h"
|
||||
|
||||
// struct gnPlatformUniformBuffer {
|
||||
|
||||
// };
|
||||
|
||||
// gnUniformBuffer::gnUniformBuffer() {
|
||||
// this->uniformBuffer = new gnPlatformUniformBuffer();
|
||||
// }
|
||||
|
||||
// void gnUpdateUniformDescriptor(const gnUniformBuffer& uniformBuffer, const gnOutputDevice& outputDevice) {
|
||||
// VkDescriptorBufferInfo bufferInfo{};
|
||||
// bufferInfo.buffer = uniformBuffer.buffer->buffer->buffer;
|
||||
// bufferInfo.offset = 0;
|
||||
// bufferInfo.range = uniformBuffer.size;
|
||||
|
||||
// VkWriteDescriptorSet descriptorWrite{};
|
||||
// descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
// descriptorWrite.dstSet = uniformBuffer.descriptorSet->descriptorSet->descriptorSets[uniformBuffer.index];
|
||||
// descriptorWrite.dstBinding = uniformBuffer.binding;
|
||||
// descriptorWrite.dstArrayElement = 0;
|
||||
|
||||
// descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
// descriptorWrite.descriptorCount = 1;
|
||||
|
||||
// descriptorWrite.pBufferInfo = &bufferInfo;
|
||||
// descriptorWrite.pImageInfo = nullptr; // Optional
|
||||
// descriptorWrite.pTexelBufferView = nullptr; // Optional
|
||||
|
||||
// vkUpdateDescriptorSets(outputDevice.outputDevice->device, 1, &descriptorWrite, 0, nullptr);
|
||||
// }
|
@@ -1,52 +0,0 @@
|
||||
#include "vulkan_uniform.h"
|
||||
#include "output_device/vulkan_output_devices.h"
|
||||
#include "vulkan_uniform_layout.h"
|
||||
|
||||
void gnUniformSetCount(gnUniform& uniform, gnUInt count) {
|
||||
uniform.descriptorCount = count;
|
||||
}
|
||||
void gnUniformSetLayout(gnUniform& uniform, gnUniformLayout* uniformLayout) {
|
||||
uniform.uniformLayout = uniformLayout;
|
||||
}
|
||||
GN_EXPORT gnReturnCode gnCreateUniformFn(gnUniform* uniform, gnOutputDevice& outputDevice) {
|
||||
uniform->uniform = new gnPlatformUniform();
|
||||
uniform->uniform->outputDevice = &outputDevice;
|
||||
|
||||
std::vector<VkDescriptorPoolSize> poolSizes;
|
||||
poolSizes.resize(uniform->uniformLayout->bindings.size());
|
||||
|
||||
|
||||
for (int i = 0; i < uniform->uniformLayout->bindings.size(); i++) {
|
||||
poolSizes[i].type = (uniform->uniformLayout->bindings[i].type == GN_UNIFORM_BUFFER_DESCRIPTOR) ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
poolSizes[i].descriptorCount = static_cast<uint32_t>(uniform->descriptorCount);
|
||||
}
|
||||
|
||||
VkDescriptorPoolCreateInfo poolInfo{};
|
||||
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
|
||||
poolInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
|
||||
poolInfo.pPoolSizes = poolSizes.data();
|
||||
poolInfo.maxSets = static_cast<uint32_t>(uniform->descriptorCount);
|
||||
|
||||
if (vkCreateDescriptorPool(outputDevice.outputDevice->device, &poolInfo, nullptr, &uniform->uniform->descriptorPool) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
std::vector<VkDescriptorSetLayout> layouts(uniform->descriptorCount, uniform->uniformLayout->uniformLayout->setLayout);
|
||||
VkDescriptorSetAllocateInfo allocInfo{};
|
||||
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
|
||||
allocInfo.descriptorPool = uniform->uniform->descriptorPool;
|
||||
allocInfo.descriptorSetCount = static_cast<uint32_t>(uniform->descriptorCount);
|
||||
allocInfo.pSetLayouts = layouts.data();
|
||||
|
||||
uniform->uniform->descriptorSets.resize(uniform->descriptorCount);
|
||||
|
||||
if (vkAllocateDescriptorSets(outputDevice.outputDevice->device, &allocInfo, &uniform->uniform->descriptorSets[0]) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyUniformFn(gnUniform& uniform) {
|
||||
vkDestroyDescriptorPool(uniform.uniform->outputDevice->outputDevice->device, uniform.uniform->descriptorPool, nullptr);
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
#pragma once
|
||||
#include <vulkan/vulkan.h>
|
||||
#include <vector>
|
||||
#include <core/uniform_descriptor/gryphn_uniform.h>
|
||||
|
||||
struct gnOutputDevice;
|
||||
|
||||
struct gnPlatformUniform {
|
||||
VkDescriptorPool descriptorPool;
|
||||
gnOutputDevice* outputDevice;
|
||||
std::vector<VkDescriptorSet> descriptorSets;
|
||||
};
|
@@ -1,40 +0,0 @@
|
||||
#include "vulkan_uniform_layout.h"
|
||||
#include "output_device/vulkan_output_devices.h"
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateUniformLayoutFn(gnUniformLayout* uniformLayout, gnOutputDevice& device) {
|
||||
uniformLayout->uniformLayout = new gnPlatformUniformLayout();
|
||||
|
||||
uniformLayout->uniformLayout->outputDevice = &device;
|
||||
std::vector<VkDescriptorSetLayoutBinding> bindings(uniformLayout->bindings.size());
|
||||
|
||||
|
||||
for (int i = 0; i < uniformLayout->bindings.size(); i++) {
|
||||
bindings[i] = {};
|
||||
bindings[i].binding = uniformLayout->bindings[i].binding;
|
||||
bindings[i].descriptorType = (uniformLayout->bindings[i].type == GN_UNIFORM_BUFFER_DESCRIPTOR) ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
bindings[i].descriptorCount = 1;
|
||||
|
||||
int stageBit = 0;
|
||||
|
||||
if (gnContainsShaderStage(uniformLayout->bindings[i].stage, GN_VERTEX_SHADER_MODULE)) stageBit |= VK_SHADER_STAGE_VERTEX_BIT;
|
||||
if (gnContainsShaderStage(uniformLayout->bindings[i].stage, GN_FRAGMENT_SHADER_MODULE)) stageBit |= VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
|
||||
bindings[i].stageFlags = stageBit;
|
||||
bindings[i].pImmutableSamplers = nullptr;
|
||||
}
|
||||
|
||||
VkDescriptorSetLayoutCreateInfo layoutInfo{};
|
||||
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
||||
layoutInfo.bindingCount = static_cast<uint32_t>(uniformLayout->bindings.size());
|
||||
layoutInfo.pBindings = bindings.data();
|
||||
|
||||
if (vkCreateDescriptorSetLayout(device.outputDevice->device, &layoutInfo, nullptr, &uniformLayout->uniformLayout->setLayout) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnDestroyUniformLayoutFn(gnUniformLayout& uniformLayout) {
|
||||
vkDestroyDescriptorSetLayout(uniformLayout.uniformLayout->outputDevice->outputDevice->device, uniformLayout.uniformLayout->setLayout, nullptr);
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
#pragma once
|
||||
#include "gryphn/gryphn.h"
|
||||
#include <vulkan/vulkan.h>
|
||||
|
||||
struct gnPlatformUniformLayout {
|
||||
gnOutputDevice* outputDevice;
|
||||
VkDescriptorSetLayout setLayout;
|
||||
};
|
||||
|
||||
struct gnPlatformUniformLayoutBinding {
|
||||
// nothing here
|
||||
};
|
@@ -1,8 +0,0 @@
|
||||
#pragma once
|
||||
#include "core/buffers/uniform_buffer_descriptor/gryphn_buffer_description.h"
|
||||
#include <vulkan/vulkan.h>
|
||||
|
||||
struct gnPlatformBufferDescription {
|
||||
VkDescriptorSet descriptorSet;
|
||||
gnGraphicsPipeline* graphicsPipeline;
|
||||
};
|
@@ -1,9 +0,0 @@
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "vulkan_buffer_description.h"
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateBufferDescriptionFn(gnBufferDescription* bufferDescription, const gnGraphicsPipeline& graphicsPipeline) {
|
||||
if (bufferDescription->bufferDescription == nullptr) bufferDescription->bufferDescription = new gnPlatformBufferDescription();
|
||||
|
||||
bufferDescription->bufferDescription->graphicsPipeline = const_cast<gnGraphicsPipeline*>(&graphicsPipeline);
|
||||
return GN_SUCCESS;
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
#include <gryphn/gryphn_utils.h>
|
||||
#include "core/buffers/vertex_descriptions/gryphn_binding_description.h"
|
||||
#include "core/buffers/vertex_descriptions/gryphn_vertex_description.h"
|
||||
#include "vulkan_vertex_description.h"
|
||||
|
||||
GN_EXPORT void gnVertexDescriptionSetPropertiesCountFn(gnVertexDescription& vertexDescription, int count) {
|
||||
if (vertexDescription.vertexDescription == nullptr) vertexDescription.vertexDescription = new gnPlatformVertexDescription();
|
||||
|
||||
// anyone smart would not use malloc in C but fuck you im not smart and plus I kinda dont care if this shit breaks brinkerhoff aint going to know
|
||||
vertexDescription.vertexDescription->attributeDescriptions = (VkVertexInputAttributeDescription*)malloc(sizeof(VkVertexInputAttributeDescription) * count);
|
||||
vertexDescription.vertexDescription->attributeDescriptionCount = count;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnVertexDescriptionSetPropertyFn(gnVertexDescription& vertexDescription, int index, const gnVertexProperty& property) {
|
||||
if (vertexDescription.vertexDescription == nullptr) vertexDescription.vertexDescription = new gnPlatformVertexDescription();
|
||||
vertexDescription.vertexDescription->attributeDescriptions[index].binding = property.binding;
|
||||
vertexDescription.vertexDescription->attributeDescriptions[index].location = property.location;
|
||||
|
||||
if (property.format == GN_FLOAT) vertexDescription.vertexDescription->attributeDescriptions[index].format = VK_FORMAT_R32_SFLOAT;
|
||||
else if (property.format == GN_FLOAT2) vertexDescription.vertexDescription->attributeDescriptions[index].format = VK_FORMAT_R32G32_SFLOAT;
|
||||
else if (property.format == GN_FLOAT3) vertexDescription.vertexDescription->attributeDescriptions[index].format = VK_FORMAT_R32G32B32_SFLOAT;
|
||||
else if (property.format == GN_FLOAT4) vertexDescription.vertexDescription->attributeDescriptions[index].format = VK_FORMAT_R32G32B32A32_SFLOAT;
|
||||
|
||||
else if (property.format == GN_UINT) vertexDescription.vertexDescription->attributeDescriptions[index].format = VK_FORMAT_R32_UINT;
|
||||
|
||||
vertexDescription.vertexDescription->attributeDescriptions[index].offset = property.offset;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnVertexDescriptionSetBindingDescriptionFn(gnVertexDescription& vertexDescription, const gnBindingDescription& description) {
|
||||
if (vertexDescription.vertexDescription == nullptr) vertexDescription.vertexDescription = new gnPlatformVertexDescription();
|
||||
vertexDescription.vertexDescription->bindingDescription.binding = description.binding;
|
||||
vertexDescription.vertexDescription->bindingDescription.stride = description.stride;
|
||||
vertexDescription.vertexDescription->bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
|
||||
}
|
||||
|
||||
// gnVertexDescription::gnVertexDescription() {
|
||||
// vertexDescription = new gnPlatformVertexDescription();
|
||||
// }
|
@@ -1,9 +0,0 @@
|
||||
#pragma once
|
||||
#include "vulkan/vulkan.h"
|
||||
|
||||
struct gnPlatformVertexDescription {
|
||||
VkVertexInputBindingDescription bindingDescription{};
|
||||
VkVertexInputAttributeDescription* attributeDescriptions;
|
||||
|
||||
uint32_t attributeDescriptionCount;
|
||||
};
|
@@ -1,157 +0,0 @@
|
||||
#include <gryphn/gryphn.h>
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "core/buffers/gryphn_buffer.h"
|
||||
#include "output_device/vulkan_output_devices.h"
|
||||
#include "vulkan_buffers.h"
|
||||
#include "commands/vulkan_command_buffer.h"
|
||||
|
||||
gnReturnCode findMemoryType(VkPhysicalDevice physicalDevice, uint32_t typeFilter, VkMemoryPropertyFlags properties, uint32_t* memoryType) {
|
||||
VkPhysicalDeviceMemoryProperties memProperties;
|
||||
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
|
||||
|
||||
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
|
||||
if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) {
|
||||
*memoryType = i;
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
void vkCopyBuffer(const gnOutputDevice& outputDevice, VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size) {
|
||||
VkCommandBuffer commandBuffer = beginSingleTimeCommands(outputDevice);
|
||||
|
||||
VkBufferCopy copyRegion{};
|
||||
copyRegion.srcOffset = 0; // Optional
|
||||
copyRegion.dstOffset = 0; // Optional
|
||||
copyRegion.size = size;
|
||||
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
|
||||
|
||||
endSingleTimeCommands(commandBuffer, outputDevice);
|
||||
}
|
||||
|
||||
gnReturnCode vulkanCreateBuffer(const gnOutputDevice& outputDevice, gnSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, VkBuffer& buffer, VkDeviceMemory& bufferMemory) {
|
||||
VkBufferCreateInfo bufferInfo{};
|
||||
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
||||
bufferInfo.size = size;
|
||||
bufferInfo.usage = usage;
|
||||
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||
|
||||
if (vkCreateBuffer(outputDevice.outputDevice->device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
VkMemoryRequirements memRequirements;
|
||||
vkGetBufferMemoryRequirements(outputDevice.outputDevice->device, buffer, &memRequirements);
|
||||
|
||||
VkMemoryAllocateInfo allocInfo{};
|
||||
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
||||
allocInfo.allocationSize = memRequirements.size;
|
||||
uint32_t memoryTypeIndex;
|
||||
if (findMemoryType(
|
||||
outputDevice.physicalOutputDevice->physicalOutputDevice->device,
|
||||
memRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
||||
&memoryTypeIndex) != GN_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
allocInfo.memoryTypeIndex = memoryTypeIndex;
|
||||
|
||||
if (vkAllocateMemory(outputDevice.outputDevice->device, &allocInfo, nullptr, &bufferMemory) != VK_SUCCESS) {
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
vkBindBufferMemory(outputDevice.outputDevice->device, buffer, bufferMemory, 0);
|
||||
|
||||
return GN_SUCCESS;
|
||||
}
|
||||
|
||||
GN_EXPORT gnReturnCode gnCreateBufferFn(gnBuffer* buffer, const gnOutputDevice& outputDevice) {
|
||||
if (buffer->buffer == nullptr) buffer->buffer = new gnPlatformBuffer();
|
||||
buffer->buffer->device = const_cast<gnOutputDevice*>(&outputDevice);
|
||||
|
||||
vulkanCreateBuffer(
|
||||
*buffer->buffer->device, buffer->size,
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
||||
buffer->buffer->stagingBuffer, buffer->buffer->stagingBufferMemory
|
||||
);
|
||||
|
||||
if (buffer->bufferType == GN_VERTEX_BUFFER) {
|
||||
return vulkanCreateBuffer(
|
||||
outputDevice, buffer->size,
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
buffer->buffer->buffer, buffer->buffer->bufferMemory
|
||||
);
|
||||
} else if (buffer->bufferType == GN_INDEX_BUFFER) {
|
||||
return vulkanCreateBuffer(
|
||||
outputDevice, buffer->size,
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
buffer->buffer->buffer, buffer->buffer->bufferMemory
|
||||
);
|
||||
} else if (buffer->bufferType == GN_UNIFORM_BUFFER) {
|
||||
return vulkanCreateBuffer(
|
||||
outputDevice, buffer->size,
|
||||
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
||||
buffer->buffer->buffer, buffer->buffer->bufferMemory
|
||||
);
|
||||
}
|
||||
|
||||
return GN_FAILED;
|
||||
}
|
||||
|
||||
GN_EXPORT void gnBufferDataFn(const gnBuffer& buffer, void* inputData) {
|
||||
|
||||
// fuck ass old method idk why this wouldnt work
|
||||
// void* data2;
|
||||
// vkMapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory, 0, buffer.size, 0, &data2);
|
||||
// memcpy(data2, data, buffer.size);
|
||||
// vkUnmapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory);
|
||||
|
||||
void* data;
|
||||
vkMapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBufferMemory, 0, buffer.size, 0, &data);
|
||||
memcpy(data, inputData, buffer.size);
|
||||
vkUnmapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBufferMemory);
|
||||
|
||||
//const gnOutputDevice& outputDevice, VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size
|
||||
vkCopyBuffer(*buffer.buffer->device, buffer.buffer->stagingBuffer, buffer.buffer->buffer, buffer.size);
|
||||
|
||||
}
|
||||
|
||||
GN_EXPORT void gnBufferSubDataFn(const gnBuffer& buffer, gnSize offset, gnSize size, void* inputData) {
|
||||
void* data;
|
||||
vkMapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory, 0, buffer.size, 0, &data);
|
||||
memcpy((char*)data + offset, inputData, size);
|
||||
vkUnmapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory);
|
||||
|
||||
// void* data;
|
||||
// vkMapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBufferMemory, 0, buffer.size, 0, &data);
|
||||
// memcpy((char*)data + offset, inputData, size);
|
||||
// vkUnmapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBufferMemory);
|
||||
|
||||
// //const gnOutputDevice& outputDevice, VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size
|
||||
// vkCopyBuffer(*buffer.buffer->device, buffer.buffer->stagingBuffer, buffer.buffer->buffer, buffer.size);
|
||||
}
|
||||
GN_EXPORT void gnBufferClearDataFn(const gnBuffer& buffer) {
|
||||
void* data;
|
||||
vkMapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory, 0, buffer.size, 0, &data);
|
||||
memset(data, 0, buffer.size);
|
||||
vkUnmapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory);
|
||||
|
||||
// void* data;
|
||||
// vkMapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBufferMemory, 0, buffer.size, 0, &data);
|
||||
// memset(data, 0, buffer.size);
|
||||
// vkUnmapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBufferMemory);
|
||||
|
||||
// //const gnOutputDevice& outputDevice, VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size
|
||||
// vkCopyBuffer(*buffer.buffer->device, buffer.buffer->stagingBuffer, buffer.buffer->buffer, buffer.size);
|
||||
}
|
||||
GN_EXPORT void gnBufferMapDataFn(const gnBuffer& buffer, void** data) {
|
||||
vkMapMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory, 0, buffer.size, 0, data);
|
||||
}
|
||||
GN_EXPORT void gnDestroyBufferFn(const gnBuffer& buffer) {
|
||||
vkDestroyBuffer(buffer.buffer->device->outputDevice->device, buffer.buffer->buffer, nullptr);
|
||||
vkFreeMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->bufferMemory, nullptr);
|
||||
|
||||
vkDestroyBuffer(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBuffer, nullptr);
|
||||
vkFreeMemory(buffer.buffer->device->outputDevice->device, buffer.buffer->stagingBufferMemory, nullptr);
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#pragma once
|
||||
#include "vulkan/vulkan.h"
|
||||
#include "gryphn/gryphn_utils.h"
|
||||
struct gnOutputDevice;
|
||||
|
||||
|
||||
struct gnPlatformBuffer {
|
||||
// normal buffer jazz
|
||||
VkBuffer buffer;
|
||||
VkDeviceMemory bufferMemory;
|
||||
|
||||
// staging buffer shabang
|
||||
VkBuffer stagingBuffer;
|
||||
VkDeviceMemory stagingBufferMemory;
|
||||
|
||||
gnOutputDevice* device;
|
||||
};
|
||||
|
||||
gnReturnCode vulkanCreateBuffer(const gnOutputDevice& outputDevice, gnSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, VkBuffer& buffer, VkDeviceMemory& bufferMemory);
|
||||
gnReturnCode findMemoryType(VkPhysicalDevice physicalDevice, uint32_t typeFilter, VkMemoryPropertyFlags properties, uint32_t* memoryType);
|
@@ -1,7 +0,0 @@
|
||||
#include "core/gryphn_support.h"
|
||||
|
||||
GN_EXPORT gnBool gnAPISupportsFn(gnFeature feature) {
|
||||
if (feature == GN_DYNAMIC_STATES) return true;
|
||||
if (feature == GN_SYNC_OBJECTS) return true;
|
||||
return false;
|
||||
}
|
@@ -1,5 +1,7 @@
|
||||
#include <instance/vulkan_instance.h>
|
||||
#include "vulkan_surface.h"
|
||||
#include <output_device/vulkan_physical_device.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef GN_PLATFORM_LINUX
|
||||
#ifdef GN_WINDOW_X11
|
||||
@@ -56,3 +58,33 @@ gnReturnCode gnCreateWin32WindowSurface(struct gnWindowSurface_t* windowSurface,
|
||||
void gnDestroyWindowSurfaceFn(struct gnWindowSurface_t* windowSurface) {
|
||||
vkDestroySurfaceKHR(windowSurface->instance->instance->vk_instance, windowSurface->windowSurface->surface, NULL);
|
||||
}
|
||||
|
||||
|
||||
struct gnSurfaceFormat_t* gnGetSupportedSurfaceFormatsFn(
|
||||
struct gnWindowSurface_t* windowSurface,
|
||||
struct gnPhysicalDevice_t device,
|
||||
uint32_t* formatCount
|
||||
) {
|
||||
struct gnSurfaceFormat_t* formats = NULL;
|
||||
|
||||
vkGetPhysicalDeviceSurfaceFormatsKHR(device.physicalDevice->device, windowSurface->windowSurface->surface, formatCount, NULL);
|
||||
formats = malloc(sizeof(struct gnSurfaceFormat_t) * *formatCount);
|
||||
VkSurfaceFormatKHR* vkFormats = malloc(sizeof(VkSurfaceFormatKHR) * *formatCount);;
|
||||
|
||||
if (*formatCount > 0) {
|
||||
vkGetPhysicalDeviceSurfaceFormatsKHR(device.physicalDevice->device, windowSurface->windowSurface->surface, formatCount, vkFormats);
|
||||
for (int i = 0; i < *formatCount; i++) {
|
||||
switch (vkFormats[i].format) {
|
||||
case VK_FORMAT_B8G8R8A8_SRGB: { formats[i].format = GN_FORMAT_BGRA8_SRGB; break; }
|
||||
default: break;
|
||||
}
|
||||
|
||||
switch (vkFormats[i].colorSpace) {
|
||||
case VK_COLOR_SPACE_SRGB_NONLINEAR_KHR: { formats[i].colorSpace = GN_COLOR_SPACE_SRGB_NONLINEAR; break; }
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return formats;
|
||||
}
|
||||
|
Reference in New Issue
Block a user