2016-11-19 09:28:01 +00:00
# include "graphics/graphics.h"
2022-05-09 18:47:06 +00:00
# include "data/blob.h"
2022-04-30 03:38:34 +00:00
# include "data/image.h"
2022-07-04 00:26:31 +00:00
# include "data/modelData.h"
2022-06-19 00:43:12 +00:00
# include "data/rasterizer.h"
2022-11-08 06:45:10 +00:00
# include "event/event.h"
2022-06-06 03:38:14 +00:00
# include "headset/headset.h"
2022-05-11 19:51:13 +00:00
# include "math/math.h"
2022-04-21 07:27:13 +00:00
# include "core/gpu.h"
2022-05-07 00:26:38 +00:00
# include "core/maf.h"
2022-05-22 22:09:09 +00:00
# include "core/spv.h"
2022-04-27 05:51:24 +00:00
# include "core/os.h"
2022-04-21 07:27:13 +00:00
# include "util.h"
2022-06-22 03:05:57 +00:00
# include "monkey.h"
2022-05-28 03:47:07 +00:00
# include "shaders.h"
2022-04-30 03:38:34 +00:00
# include <math.h>
2023-11-24 01:07:44 +00:00
# include <stdatomic.h>
2022-08-09 03:36:22 +00:00
# include <limits.h>
2023-08-17 22:02:53 +00:00
# include <stdio.h>
2022-04-26 22:32:54 +00:00
# include <stdlib.h>
2022-04-20 07:38:21 +00:00
# include <string.h>
2022-05-09 18:47:06 +00:00
# ifdef LOVR_USE_GLSLANG
# include "glslang_c_interface.h"
# include "resource_limits_c.h"
# endif
2019-06-28 05:17:50 +00:00
2023-11-30 08:14:06 +00:00
# define MAX_PIPELINES 65536
2023-04-30 06:02:37 +00:00
# define MAX_TALLIES 256
2023-11-30 08:14:06 +00:00
# define TRANSFORM_STACK_SIZE 16
# define PIPELINE_STACK_SIZE 4
2022-06-04 21:28:23 +00:00
# define MAX_SHADER_RESOURCES 32
2023-03-20 20:34:30 +00:00
# define MAX_CUSTOM_ATTRIBUTES 10
2024-02-24 19:49:11 +00:00
# define LAYOUT_BUILTINS 0
2023-04-15 04:27:47 +00:00
# define LAYOUT_MATERIAL 1
2024-02-20 23:07:30 +00:00
# define LAYOUT_UNIFORMS 2
2022-07-12 03:52:35 +00:00
# define FLOAT_BITS(f) ((union { float f; uint32_t u; }) { f }).u
2022-04-27 05:44:44 +00:00
2023-04-28 02:48:12 +00:00
typedef struct {
void * next ;
void * pointer ;
2023-12-30 20:39:50 +00:00
gpu_buffer * handle ;
2023-04-28 02:48:12 +00:00
uint32_t tick ;
2023-12-30 20:39:50 +00:00
uint32_t size ;
uint32_t ref ;
} BufferBlock ;
2023-04-28 02:48:12 +00:00
typedef struct {
2023-12-30 20:39:50 +00:00
BufferBlock * freelist ;
BufferBlock * current ;
2023-04-28 02:48:12 +00:00
uint32_t cursor ;
2023-12-30 20:39:50 +00:00
} BufferAllocator ;
typedef struct {
BufferBlock * block ;
gpu_buffer * buffer ;
uint32_t offset ;
uint32_t extent ;
void * pointer ;
} BufferView ;
2023-04-28 02:48:12 +00:00
2023-12-30 22:17:20 +00:00
typedef struct {
gpu_phase readPhase ;
gpu_phase writePhase ;
gpu_cache pendingReads ;
gpu_cache pendingWrite ;
uint32_t lastTransferRead ;
uint32_t lastTransferWrite ;
gpu_barrier * barrier ;
} Sync ;
struct Buffer {
uint32_t ref ;
uint32_t base ;
Sync sync ;
gpu_buffer * gpu ;
BufferBlock * block ;
2024-04-16 22:04:20 +00:00
bool complexFormat ;
2023-12-30 22:17:20 +00:00
BufferInfo info ;
} ;
2022-04-30 03:38:34 +00:00
struct Texture {
uint32_t ref ;
2024-03-02 01:13:06 +00:00
bool xrAcquired ;
2023-04-30 06:02:37 +00:00
Sync sync ;
2022-04-30 03:38:34 +00:00
gpu_texture * gpu ;
gpu_texture * renderView ;
2023-11-29 06:44:37 +00:00
gpu_texture * storageView ;
2022-06-25 02:38:45 +00:00
Material * material ;
2024-02-26 23:08:34 +00:00
Texture * root ;
uint32_t baseLayer ;
uint32_t baseLevel ;
2022-04-30 03:38:34 +00:00
TextureInfo info ;
} ;
2022-05-01 22:47:17 +00:00
struct Sampler {
uint32_t ref ;
gpu_sampler * gpu ;
SamplerInfo info ;
} ;
2024-01-03 19:18:58 +00:00
enum {
2024-02-20 23:07:30 +00:00
FLAG_VERTEX = ( 1 < < 0 ) ,
FLAG_FRAGMENT = ( 1 < < 1 ) ,
FLAG_COMPUTE = ( 1 < < 2 )
2024-01-03 19:18:58 +00:00
} ;
2022-05-22 22:09:09 +00:00
typedef struct {
uint32_t hash ;
uint32_t binding ;
gpu_slot_type type ;
2023-04-30 02:06:08 +00:00
gpu_phase phase ;
gpu_cache cache ;
2023-01-16 13:15:13 +00:00
uint32_t fieldCount ;
2023-08-25 20:42:09 +00:00
DataField * format ;
2022-05-22 22:09:09 +00:00
} ShaderResource ;
2022-06-16 03:46:43 +00:00
typedef struct {
uint32_t location ;
uint32_t hash ;
} ShaderAttribute ;
2022-05-09 18:47:06 +00:00
struct Shader {
uint32_t ref ;
2022-05-22 22:10:07 +00:00
Shader * parent ;
2022-05-09 18:47:06 +00:00
gpu_shader * gpu ;
2023-03-16 03:50:46 +00:00
gpu_pipeline * computePipeline ;
2022-05-09 18:47:06 +00:00
ShaderInfo info ;
2022-08-07 01:05:30 +00:00
size_t layout ;
2022-08-06 20:06:42 +00:00
uint32_t workgroupSize [ 3 ] ;
2023-01-16 13:15:13 +00:00
bool hasCustomAttributes ;
uint32_t attributeCount ;
uint32_t resourceCount ;
2022-05-24 05:32:36 +00:00
uint32_t bufferMask ;
uint32_t textureMask ;
uint32_t samplerMask ;
uint32_t storageMask ;
2024-02-20 23:07:30 +00:00
uint32_t uniformSize ;
uint32_t uniformCount ;
2024-01-03 19:18:58 +00:00
uint32_t stageMask ;
2022-06-16 03:46:43 +00:00
ShaderAttribute * attributes ;
2023-01-16 13:15:13 +00:00
ShaderResource * resources ;
2024-02-20 23:07:30 +00:00
DataField * uniforms ;
2023-06-24 04:14:19 +00:00
DataField * fields ;
2022-05-22 22:09:09 +00:00
uint32_t flagCount ;
uint32_t overrideCount ;
gpu_shader_flag * flags ;
uint32_t * flagLookup ;
2023-01-16 13:15:13 +00:00
char * names ;
2022-05-09 18:47:06 +00:00
} ;
2022-06-17 06:49:09 +00:00
struct Material {
uint32_t ref ;
uint32_t next ;
uint32_t tick ;
uint16_t index ;
uint16_t block ;
gpu_bundle * bundle ;
MaterialInfo info ;
2022-08-06 07:50:25 +00:00
bool hasWritableTexture ;
2022-06-17 06:49:09 +00:00
} ;
2022-06-19 00:43:12 +00:00
typedef struct {
uint32_t codepoint ;
float advance ;
2022-06-27 03:28:30 +00:00
uint16_t x , y ;
uint16_t uv [ 4 ] ;
2022-06-21 01:26:15 +00:00
float box [ 4 ] ;
2022-06-19 00:43:12 +00:00
} Glyph ;
struct Font {
uint32_t ref ;
FontInfo info ;
Material * material ;
arr_t ( Glyph ) glyphs ;
map_t glyphLookup ;
2022-06-21 01:26:15 +00:00
map_t kerning ;
2022-06-19 00:43:12 +00:00
float pixelDensity ;
2022-06-26 02:54:13 +00:00
float lineSpacing ;
2022-06-27 03:28:30 +00:00
uint32_t padding ;
2022-06-19 00:43:12 +00:00
Texture * atlas ;
uint32_t atlasWidth ;
uint32_t atlasHeight ;
uint32_t rowHeight ;
uint32_t atlasX ;
uint32_t atlasY ;
} ;
2023-06-11 05:06:29 +00:00
struct Mesh {
uint32_t ref ;
MeshStorage storage ;
Buffer * vertexBuffer ;
Buffer * indexBuffer ;
uint32_t indexCount ;
uint32_t dirtyVertices [ 2 ] ;
bool dirtyIndices ;
void * vertices ;
void * indices ;
float bounds [ 6 ] ;
bool hasBounds ;
DrawMode mode ;
uint32_t drawStart ;
uint32_t drawCount ;
uint32_t baseVertex ;
Material * material ;
} ;
2022-05-30 22:36:31 +00:00
typedef struct {
2023-08-02 01:45:37 +00:00
float transform [ 12 ] ;
2022-05-30 22:36:31 +00:00
float color [ 4 ] ;
} DrawData ;
2022-06-12 02:07:46 +00:00
typedef enum {
VERTEX_SHAPE ,
VERTEX_POINT ,
VERTEX_GLYPH ,
2022-07-04 00:26:31 +00:00
VERTEX_MODEL ,
2022-06-25 02:59:48 +00:00
VERTEX_EMPTY ,
2024-01-18 00:05:37 +00:00
VERTEX_FORMAT_COUNT
2022-06-17 06:49:09 +00:00
} VertexFormat ;
2022-06-12 02:07:46 +00:00
typedef struct {
2022-07-12 03:52:35 +00:00
uint64_t hash ;
2023-06-11 05:06:29 +00:00
DrawMode mode ;
2022-06-12 02:07:46 +00:00
DefaultShader shader ;
2022-06-17 06:49:09 +00:00
Material * material ;
2022-06-12 02:07:46 +00:00
float * transform ;
2023-06-23 21:41:39 +00:00
float * bounds ;
2022-06-12 02:07:46 +00:00
struct {
Buffer * buffer ;
2022-06-17 06:49:09 +00:00
VertexFormat format ;
2022-06-12 02:07:46 +00:00
uint32_t count ;
2022-07-04 02:59:51 +00:00
void * * pointer ;
2022-06-12 02:07:46 +00:00
} vertex ;
struct {
Buffer * buffer ;
uint32_t count ;
2022-07-04 02:59:51 +00:00
void * * pointer ;
2022-06-12 02:07:46 +00:00
} index ;
uint32_t start ;
uint32_t count ;
uint32_t instances ;
2024-01-18 00:05:37 +00:00
uint32_t baseVertex ;
2023-04-30 06:02:37 +00:00
} DrawInfo ;
2022-06-12 02:07:46 +00:00
2022-07-04 00:26:31 +00:00
typedef struct {
2024-01-26 18:47:54 +00:00
float position [ 3 ] ;
float rotation [ 4 ] ;
float scale [ 3 ] ;
2022-07-04 00:26:31 +00:00
} NodeTransform ;
2023-03-15 02:36:09 +00:00
typedef struct {
2023-03-15 06:25:49 +00:00
uint32_t index ;
2023-03-15 02:36:09 +00:00
uint32_t count ;
2023-03-15 06:25:49 +00:00
uint32_t vertexIndex ;
2023-03-15 02:36:09 +00:00
uint32_t vertexCount ;
} BlendGroup ;
2022-07-04 00:26:31 +00:00
struct Model {
uint32_t ref ;
2023-05-10 07:47:16 +00:00
Model * parent ;
2022-07-04 00:26:31 +00:00
ModelInfo info ;
2023-04-30 06:02:37 +00:00
DrawInfo * draws ;
2022-07-04 00:26:31 +00:00
Buffer * rawVertexBuffer ;
Buffer * vertexBuffer ;
Buffer * indexBuffer ;
2023-03-15 02:36:09 +00:00
Buffer * blendBuffer ;
2022-07-04 00:26:31 +00:00
Buffer * skinBuffer ;
2023-06-11 05:06:29 +00:00
Mesh * * meshes ;
2022-07-04 00:26:31 +00:00
Texture * * textures ;
Material * * materials ;
NodeTransform * localTransforms ;
float * globalTransforms ;
2023-06-23 21:41:39 +00:00
float * boundingBoxes ;
2022-07-04 00:26:31 +00:00
bool transformsDirty ;
2023-03-16 07:22:01 +00:00
bool blendShapesDirty ;
2023-03-15 02:36:09 +00:00
float * blendShapeWeights ;
BlendGroup * blendGroups ;
uint32_t blendGroupCount ;
2023-04-01 01:16:54 +00:00
uint32_t lastVertexAnimation ;
2022-07-04 00:26:31 +00:00
} ;
2023-04-30 01:25:58 +00:00
typedef enum {
READBACK_BUFFER ,
2023-05-07 06:36:33 +00:00
READBACK_TEXTURE ,
READBACK_TIMESTAMP
2023-04-30 01:25:58 +00:00
} ReadbackType ;
2023-05-07 06:36:33 +00:00
typedef struct {
Pass * pass ;
double cpuTime ;
2023-11-30 09:16:19 +00:00
} TimingInfo ;
2023-05-07 06:36:33 +00:00
2022-07-14 07:05:58 +00:00
struct Readback {
uint32_t ref ;
uint32_t tick ;
Readback * next ;
2023-12-30 20:39:50 +00:00
BufferView view ;
2023-04-30 01:25:58 +00:00
ReadbackType type ;
2023-05-07 06:36:33 +00:00
union {
struct {
Buffer * buffer ;
Blob * blob ;
} ;
struct {
Texture * texture ;
Image * image ;
} ;
struct {
2023-11-30 09:16:19 +00:00
TimingInfo * times ;
2023-05-07 06:36:33 +00:00
uint32_t count ;
} ;
} ;
2022-07-14 07:05:58 +00:00
} ;
2022-08-06 20:37:27 +00:00
typedef struct {
2022-08-26 04:57:15 +00:00
float resolution [ 2 ] ;
2022-08-06 20:37:27 +00:00
float time ;
} Globals ;
2022-07-04 00:26:31 +00:00
typedef struct {
2023-06-06 02:44:53 +00:00
float viewMatrix [ 16 ] ;
2022-07-04 00:26:31 +00:00
float projection [ 16 ] ;
float viewProjection [ 16 ] ;
float inverseProjection [ 16 ] ;
} Camera ;
2022-08-23 03:30:09 +00:00
typedef struct {
struct { float x , y , z ; } position ;
struct { float x , y , z ; } normal ;
struct { float u , v ; } uv ;
} ShapeVertex ;
typedef struct {
struct { float x , y , z ; } position ;
2024-01-21 01:37:00 +00:00
uint32_t normal ;
2022-08-23 03:30:09 +00:00
struct { float u , v ; } uv ;
struct { uint8_t r , g , b , a ; } color ;
2024-01-21 01:37:00 +00:00
uint32_t tangent ;
2022-08-23 03:30:09 +00:00
} ModelVertex ;
2023-03-16 07:22:01 +00:00
typedef struct {
struct { float x , y , z ; } position ;
struct { float x , y , z ; } normal ;
struct { float x , y , z ; } tangent ;
} BlendVertex ;
2022-07-12 03:52:35 +00:00
enum {
SHAPE_PLANE ,
SHAPE_BOX ,
SHAPE_CIRCLE ,
SHAPE_SPHERE ,
SHAPE_CYLINDER ,
2022-07-17 23:38:00 +00:00
SHAPE_CONE ,
2022-07-12 03:52:35 +00:00
SHAPE_CAPSULE ,
SHAPE_TORUS ,
SHAPE_MONKEY
} ;
2023-04-30 06:02:37 +00:00
enum {
DIRTY_BINDINGS = ( 1 < < 0 ) ,
2024-02-20 23:07:30 +00:00
DIRTY_UNIFORMS = ( 1 < < 1 ) ,
2023-06-23 21:41:39 +00:00
DIRTY_CAMERA = ( 1 < < 2 ) ,
NEEDS_VIEW_CULL = ( 1 < < 3 )
2023-04-30 06:02:37 +00:00
} ;
typedef struct {
char * memory ;
size_t cursor ;
size_t length ;
size_t limit ;
} Allocator ;
2022-07-12 03:52:35 +00:00
typedef struct {
uint64_t hash ;
2023-04-30 06:02:37 +00:00
uint32_t start ;
uint32_t baseVertex ;
2024-03-10 17:36:13 +00:00
uint32_t vertexBufferOffset ;
2023-04-30 06:02:37 +00:00
gpu_buffer * vertexBuffer ;
gpu_buffer * indexBuffer ;
2023-12-30 22:17:20 +00:00
} CachedShape ;
2022-07-12 03:52:35 +00:00
2023-04-30 06:02:37 +00:00
enum {
ACCESS_COMPUTE ,
ACCESS_RENDER
} ;
2022-06-04 21:28:23 +00:00
typedef struct {
Sync * sync ;
2023-12-01 03:28:06 +00:00
void * object ;
2022-06-04 21:28:23 +00:00
gpu_phase phase ;
gpu_cache cache ;
} Access ;
2023-04-30 06:02:37 +00:00
typedef struct {
void * prev ;
void * next ;
uint64_t count ;
uint64_t textureMask ;
2023-12-01 03:28:06 +00:00
uint64_t padding ;
Access list [ 41 ] ;
2023-04-30 06:02:37 +00:00
} AccessBlock ;
typedef struct {
Texture * texture ;
LoadAction load ;
float clear [ 4 ] ;
} ColorAttachment ;
typedef struct {
Texture * texture ;
TextureFormat format ;
LoadAction load ;
float clear ;
} DepthAttachment ;
typedef struct {
ColorAttachment color [ 4 ] ;
DepthAttachment depth ;
uint32_t count ;
2022-08-26 04:57:15 +00:00
uint32_t width ;
uint32_t height ;
2023-04-30 06:02:37 +00:00
uint32_t views ;
uint32_t samples ;
bool resolve ;
} Canvas ;
typedef struct {
bool dirty ;
2023-06-23 21:41:39 +00:00
bool viewCull ;
2023-06-11 05:06:29 +00:00
DrawMode mode ;
2023-04-30 06:02:37 +00:00
float color [ 4 ] ;
2023-06-24 02:05:42 +00:00
Buffer * lastVertexBuffer ;
VertexFormat lastVertexFormat ;
2023-04-30 06:02:37 +00:00
gpu_pipeline_info info ;
Material * material ;
Shader * shader ;
Font * font ;
} Pipeline ;
enum {
COMPUTE_INDIRECT = ( 1 < < 0 ) ,
COMPUTE_BARRIER = ( 1 < < 1 )
} ;
typedef struct {
uint32_t flags ;
Shader * shader ;
gpu_bundle_info * bundleInfo ;
gpu_bundle * bundle ;
2024-02-20 23:07:30 +00:00
gpu_buffer * uniformBuffer ;
uint32_t uniformOffset ;
2023-04-30 06:02:37 +00:00
union {
struct {
uint32_t x ;
uint32_t y ;
uint32_t z ;
} ;
struct {
gpu_buffer * buffer ;
uint32_t offset ;
} indirect ;
} ;
} Compute ;
enum {
DRAW_INDIRECT = ( 1 < < 0 ) ,
2023-06-10 04:33:33 +00:00
DRAW_INDEX32 = ( 1 < < 1 ) ,
2023-11-10 00:08:14 +00:00
DRAW_HAS_BOUNDS = ( 1 < < 2 )
2023-04-30 06:02:37 +00:00
} ;
typedef struct {
2023-06-06 02:44:53 +00:00
uint16_t flags ;
uint16_t camera ;
2023-04-30 06:02:37 +00:00
uint32_t tally ;
Shader * shader ;
Material * material ;
gpu_pipeline_info * pipelineInfo ;
gpu_bundle_info * bundleInfo ;
gpu_pipeline * pipeline ;
gpu_bundle * bundle ;
gpu_buffer * vertexBuffer ;
gpu_buffer * indexBuffer ;
2024-02-20 23:07:30 +00:00
gpu_buffer * uniformBuffer ;
2024-03-10 17:36:13 +00:00
uint32_t vertexBufferOffset ;
2024-02-20 23:07:30 +00:00
uint32_t uniformOffset ;
2023-04-30 06:02:37 +00:00
union {
struct {
uint32_t start ;
uint32_t count ;
uint32_t instances ;
uint32_t baseVertex ;
} ;
struct {
gpu_buffer * buffer ;
uint32_t offset ;
uint32_t count ;
uint32_t stride ; // Deprecated
} indirect ;
} ;
float transform [ 16 ] ;
float color [ 4 ] ;
2023-06-23 21:41:39 +00:00
float bounds [ 6 ] ;
2023-04-30 06:02:37 +00:00
} Draw ;
typedef struct {
gpu_tally * gpu ;
2024-01-18 00:05:37 +00:00
Buffer * tempBuffer ;
2023-06-01 01:56:09 +00:00
bool active ;
2023-04-30 06:02:37 +00:00
uint32_t count ;
uint32_t bufferOffset ;
2023-06-01 01:56:09 +00:00
Buffer * buffer ;
2023-04-30 06:02:37 +00:00
} Tally ;
struct Pass {
uint32_t ref ;
uint32_t flags ;
2024-01-12 01:22:58 +00:00
gpu_pass * gpu ;
2023-04-30 06:02:37 +00:00
Allocator allocator ;
2023-12-30 20:39:50 +00:00
BufferAllocator buffers ;
2023-12-30 22:17:20 +00:00
CachedShape geocache [ 16 ] ;
2023-04-30 06:02:37 +00:00
AccessBlock * access [ 2 ] ;
Tally tally ;
Canvas canvas ;
Camera * cameras ;
2023-06-06 02:44:53 +00:00
uint32_t cameraCount ;
2023-04-30 06:02:37 +00:00
float viewport [ 6 ] ;
uint32_t scissor [ 4 ] ;
Sampler * sampler ;
2022-05-07 00:26:38 +00:00
float * transform ;
2022-05-11 19:50:26 +00:00
Pipeline * pipeline ;
2023-04-30 06:02:37 +00:00
uint32_t transformIndex ;
2022-05-11 19:50:26 +00:00
uint32_t pipelineIndex ;
2023-04-30 06:02:37 +00:00
gpu_binding * bindings ;
2024-02-20 23:07:30 +00:00
void * uniforms ;
2023-04-30 06:02:37 +00:00
uint32_t computeCount ;
Compute * computes ;
2023-11-18 10:02:05 +00:00
uint32_t drawCount ;
uint32_t drawCapacity ;
Draw * draws ;
2023-06-23 21:41:39 +00:00
PassStats stats ;
2022-04-29 05:30:31 +00:00
} ;
2022-06-17 06:49:09 +00:00
typedef struct {
Material * list ;
2023-12-30 22:17:20 +00:00
BufferView view ;
2022-06-17 06:49:09 +00:00
gpu_bundle_pool * bundlePool ;
gpu_bundle * bundles ;
uint32_t head ;
uint32_t tail ;
} MaterialBlock ;
2022-05-24 04:40:57 +00:00
typedef struct {
void * next ;
gpu_bundle_pool * gpu ;
gpu_bundle * bundles ;
uint32_t cursor ;
uint32_t tick ;
} BundlePool ;
typedef struct {
uint64_t hash ;
gpu_layout * gpu ;
BundlePool * head ;
BundlePool * tail ;
} Layout ;
2022-08-26 04:57:15 +00:00
typedef struct {
gpu_texture * texture ;
uint32_t hash ;
uint32_t tick ;
} ScratchTexture ;
2019-06-27 08:47:08 +00:00
static struct {
2023-11-24 01:07:44 +00:00
uint32_t ref ;
2022-04-29 05:30:31 +00:00
bool active ;
2024-03-02 01:13:06 +00:00
bool shouldPresent ;
2023-05-07 06:36:33 +00:00
bool timingEnabled ;
2022-08-26 04:57:15 +00:00
GraphicsConfig config ;
gpu_device_info device ;
gpu_features features ;
gpu_limits limits ;
2022-06-12 05:55:43 +00:00
gpu_stream * stream ;
2024-03-02 01:13:06 +00:00
gpu_barrier barrier ;
gpu_barrier transferBarrier ;
2023-05-07 06:36:33 +00:00
gpu_tally * timestamps ;
uint32_t timestampCount ;
2022-08-26 04:57:15 +00:00
uint32_t tick ;
2022-08-06 04:05:02 +00:00
float background [ 4 ] ;
2022-08-26 04:57:15 +00:00
TextureFormat depthFormat ;
2022-05-11 22:28:04 +00:00
Texture * window ;
2022-08-03 05:00:11 +00:00
Pass * windowPass ;
2022-06-21 01:26:15 +00:00
Font * defaultFont ;
2022-05-24 05:32:12 +00:00
Buffer * defaultBuffer ;
Texture * defaultTexture ;
2022-06-08 03:42:10 +00:00
Sampler * defaultSamplers [ 2 ] ;
2022-05-28 03:47:07 +00:00
Shader * defaultShaders [ DEFAULT_SHADER_COUNT ] ;
2024-01-18 00:05:37 +00:00
gpu_vertex_format vertexFormats [ VERTEX_FORMAT_COUNT ] ;
2022-07-14 07:05:58 +00:00
Readback * oldestReadback ;
Readback * newestReadback ;
2022-06-17 06:49:09 +00:00
Material * defaultMaterial ;
2022-08-07 01:05:30 +00:00
size_t materialBlock ;
2022-06-17 06:49:09 +00:00
arr_t ( MaterialBlock ) materialBlocks ;
2023-12-30 20:39:50 +00:00
BufferAllocator bufferAllocators [ 4 ] ;
2022-08-26 04:57:15 +00:00
arr_t ( ScratchTexture ) scratchTextures ;
2024-01-12 01:22:58 +00:00
map_t passLookup ;
2022-05-24 04:44:42 +00:00
map_t pipelineLookup ;
2023-11-30 08:14:06 +00:00
gpu_pipeline * pipelines ;
uint32_t pipelineCount ;
2022-05-24 04:40:57 +00:00
arr_t ( Layout ) layouts ;
2022-04-27 05:44:44 +00:00
Allocator allocator ;
2019-06-27 08:47:08 +00:00
} state ;
2016-07-07 07:04:24 +00:00
2022-04-21 07:27:13 +00:00
// Helpers
2023-04-30 01:33:58 +00:00
static void * tempAlloc ( Allocator * allocator , size_t size ) ;
static size_t tempPush ( Allocator * allocator ) ;
static void tempPop ( Allocator * allocator , size_t stack ) ;
2023-11-30 08:14:06 +00:00
static gpu_pipeline * getPipeline ( uint32_t index ) ;
2023-12-30 20:39:50 +00:00
static BufferBlock * getBlock ( gpu_buffer_type type , uint32_t size ) ;
static void freeBlock ( BufferAllocator * allocator , BufferBlock * block ) ;
static BufferView allocateBuffer ( BufferAllocator * allocator , gpu_buffer_type type , uint32_t size , size_t align ) ;
static BufferView getBuffer ( gpu_buffer_type type , uint32_t size , size_t align ) ;
2022-07-04 00:26:31 +00:00
static int u64cmp ( const void * a , const void * b ) ;
2023-12-30 22:17:20 +00:00
static uint32_t lcm ( uint32_t a , uint32_t b ) ;
2022-04-29 05:30:31 +00:00
static void beginFrame ( void ) ;
2024-03-23 20:12:26 +00:00
static void flushTransfers ( void ) ;
2022-07-14 07:05:58 +00:00
static void processReadbacks ( void ) ;
2024-01-12 01:22:58 +00:00
static gpu_pass * getPass ( Canvas * canvas ) ;
2022-08-07 01:05:30 +00:00
static size_t getLayout ( gpu_slot * slots , uint32_t count ) ;
2023-10-02 16:07:50 +00:00
static gpu_bundle * getBundle ( size_t layout , gpu_binding * bindings , uint32_t count ) ;
2024-03-02 01:13:06 +00:00
static gpu_texture * getScratchTexture ( gpu_stream * stream , Canvas * canvas , TextureFormat format , bool srgb ) ;
2022-12-10 19:20:56 +00:00
static bool isDepthFormat ( TextureFormat format ) ;
2023-11-02 20:26:49 +00:00
static bool supportsSRGB ( TextureFormat format ) ;
2022-08-07 01:05:30 +00:00
static uint32_t measureTexture ( TextureFormat format , uint32_t w , uint32_t h , uint32_t d ) ;
2022-05-26 06:52:24 +00:00
static void checkTextureBounds ( const TextureInfo * info , uint32_t offset [ 4 ] , uint32_t extent [ 3 ] ) ;
2022-06-23 02:05:36 +00:00
static void mipmapTexture ( gpu_stream * stream , Texture * texture , uint32_t base , uint32_t count ) ;
2024-02-05 23:03:28 +00:00
static ShaderResource * findShaderResource ( Shader * shader , const char * name , size_t length ) ;
2023-06-10 04:33:33 +00:00
static Access * getNextAccess ( Pass * pass , int type , bool texture ) ;
2022-06-04 21:28:23 +00:00
static void trackBuffer ( Pass * pass , Buffer * buffer , gpu_phase phase , gpu_cache cache ) ;
static void trackTexture ( Pass * pass , Texture * texture , gpu_phase phase , gpu_cache cache ) ;
2023-04-30 06:02:37 +00:00
static void trackMaterial ( Pass * pass , Material * material ) ;
2023-04-30 01:25:58 +00:00
static bool syncResource ( Access * access , gpu_barrier * barrier ) ;
2024-01-14 22:51:23 +00:00
static gpu_barrier syncTransfer ( Sync * sync , gpu_phase phase , gpu_cache cache ) ;
2022-07-04 00:26:31 +00:00
static void updateModelTransforms ( Model * model , uint32_t nodeIndex , float * parent ) ;
2022-05-22 22:09:09 +00:00
static void checkShaderFeatures ( uint32_t * features , uint32_t count ) ;
2022-11-08 06:45:10 +00:00
static void onResize ( uint32_t width , uint32_t height ) ;
2022-04-21 07:27:13 +00:00
static void onMessage ( void * context , const char * message , bool severe ) ;
// Entry
2022-08-03 05:00:11 +00:00
bool lovrGraphicsInit ( GraphicsConfig * config ) {
2023-11-24 01:07:44 +00:00
if ( atomic_fetch_add ( & state . ref , 1 ) ) return false ;
2022-04-21 07:27:13 +00:00
2022-08-03 05:00:11 +00:00
gpu_config gpu = {
. debug = config - > debug ,
2023-11-08 22:45:04 +00:00
. fnLog = onMessage ,
2024-03-11 21:38:00 +00:00
. fnAlloc = lovrMalloc ,
. fnFree = lovrFree ,
2022-04-27 07:21:04 +00:00
. engineName = " LOVR " ,
. engineVersion = { LOVR_VERSION_MAJOR , LOVR_VERSION_MINOR , LOVR_VERSION_PATCH } ,
2022-04-21 09:16:17 +00:00
. device = & state . device ,
. features = & state . features ,
2023-11-03 22:53:33 +00:00
. limits = & state . limits ,
2022-05-11 22:28:04 +00:00
# ifdef LOVR_VK
2023-11-03 22:53:33 +00:00
. vk . cacheData = config - > cacheData ,
. vk . cacheSize = config - > cacheSize ,
2023-09-18 01:29:59 +00:00
# endif
2023-11-03 22:53:33 +00:00
# if defined(LOVR_VK) && !defined(LOVR_DISABLE_HEADSET)
. vk . getPhysicalDevice = lovrHeadsetInterface ? lovrHeadsetInterface - > getVulkanPhysicalDevice : NULL ,
. vk . createInstance = lovrHeadsetInterface ? lovrHeadsetInterface - > createVulkanInstance : NULL ,
. vk . createDevice = lovrHeadsetInterface ? lovrHeadsetInterface - > createVulkanDevice : NULL ,
2022-06-06 03:38:14 +00:00
# endif
2023-11-03 22:53:33 +00:00
} ;
2022-06-06 03:38:14 +00:00
2022-08-03 05:00:11 +00:00
if ( ! gpu_init ( & gpu ) ) {
2022-04-21 07:27:13 +00:00
lovrThrow ( " Failed to initialize GPU " ) ;
}
2023-11-03 22:53:33 +00:00
state . config = * config ;
2023-05-07 06:36:33 +00:00
state . timingEnabled = config - > debug ;
2022-06-01 04:41:43 +00:00
// Temporary frame memory uses a large 1GB virtual memory allocation, committing pages as needed
state . allocator . length = 1 < < 14 ;
2022-08-23 03:30:09 +00:00
state . allocator . limit = 1 < < 30 ;
state . allocator . memory = os_vm_init ( state . allocator . limit ) ;
2022-06-01 04:41:43 +00:00
os_vm_commit ( state . allocator . memory , state . allocator . length ) ;
2023-11-30 08:14:06 +00:00
state . pipelines = os_vm_init ( MAX_PIPELINES * gpu_sizeof_pipeline ( ) ) ;
lovrAssert ( state . pipelines , " Out of memory " ) ;
2024-01-12 01:22:58 +00:00
map_init ( & state . passLookup , 4 ) ;
2022-05-24 04:44:42 +00:00
map_init ( & state . pipelineLookup , 64 ) ;
2024-03-27 19:48:35 +00:00
arr_init ( & state . layouts ) ;
arr_init ( & state . materialBlocks ) ;
arr_init ( & state . scratchTextures ) ;
2022-08-26 04:57:15 +00:00
2022-06-17 06:49:09 +00:00
gpu_slot builtinSlots [ ] = {
2022-12-03 23:02:55 +00:00
{ 0 , GPU_SLOT_UNIFORM_BUFFER , GPU_STAGE_GRAPHICS } , // Globals
2023-06-06 02:44:53 +00:00
{ 1 , GPU_SLOT_UNIFORM_BUFFER_DYNAMIC , GPU_STAGE_GRAPHICS } , // Cameras
2023-04-30 06:02:37 +00:00
{ 2 , GPU_SLOT_UNIFORM_BUFFER_DYNAMIC , GPU_STAGE_GRAPHICS } , // DrawData
{ 3 , GPU_SLOT_SAMPLER , GPU_STAGE_GRAPHICS } // Sampler
2022-05-24 04:40:57 +00:00
} ;
2023-04-15 04:27:47 +00:00
size_t builtinLayout = getLayout ( builtinSlots , COUNTOF ( builtinSlots ) ) ;
2024-02-24 19:49:11 +00:00
if ( builtinLayout ! = LAYOUT_BUILTINS ) lovrUnreachable ( ) ;
2022-06-17 06:49:09 +00:00
gpu_slot materialSlots [ ] = {
2022-12-03 23:02:55 +00:00
{ 0 , GPU_SLOT_UNIFORM_BUFFER , GPU_STAGE_GRAPHICS } , // Data
{ 1 , GPU_SLOT_SAMPLED_TEXTURE , GPU_STAGE_GRAPHICS } , // Color
{ 2 , GPU_SLOT_SAMPLED_TEXTURE , GPU_STAGE_GRAPHICS } , // Glow
{ 3 , GPU_SLOT_SAMPLED_TEXTURE , GPU_STAGE_GRAPHICS } , // Occlusion
{ 4 , GPU_SLOT_SAMPLED_TEXTURE , GPU_STAGE_GRAPHICS } , // Metalness
{ 5 , GPU_SLOT_SAMPLED_TEXTURE , GPU_STAGE_GRAPHICS } , // Roughness
{ 6 , GPU_SLOT_SAMPLED_TEXTURE , GPU_STAGE_GRAPHICS } , // Clearcoat
{ 7 , GPU_SLOT_SAMPLED_TEXTURE , GPU_STAGE_GRAPHICS } // Normal
2022-06-17 06:49:09 +00:00
} ;
2023-04-15 04:27:47 +00:00
size_t materialLayout = getLayout ( materialSlots , COUNTOF ( materialSlots ) ) ;
if ( materialLayout ! = LAYOUT_MATERIAL ) lovrUnreachable ( ) ;
2022-05-24 04:40:57 +00:00
2024-02-20 23:07:30 +00:00
gpu_slot uniformSlots [ ] = {
{ 0 , GPU_SLOT_UNIFORM_BUFFER_DYNAMIC , GPU_STAGE_GRAPHICS | GPU_STAGE_COMPUTE }
} ;
size_t uniformLayout = getLayout ( uniformSlots , COUNTOF ( uniformSlots ) ) ;
if ( uniformLayout ! = LAYOUT_UNIFORMS ) lovrUnreachable ( ) ;
2022-06-19 06:31:51 +00:00
float data [ ] = { 0.f , 0.f , 0.f , 0.f , 1.f , 1.f , 1.f , 1.f } ;
2022-06-01 04:41:43 +00:00
state . defaultBuffer = lovrBufferCreate ( & ( BufferInfo ) {
2023-01-16 13:15:13 +00:00
. size = sizeof ( data ) ,
2022-05-24 05:32:12 +00:00
. label = " Default Buffer "
2022-06-01 04:41:43 +00:00
} , NULL ) ;
2022-05-24 05:32:12 +00:00
2022-06-19 06:31:51 +00:00
beginFrame ( ) ;
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_UPLOAD , sizeof ( data ) , 4 ) ;
memcpy ( view . pointer , data , sizeof ( data ) ) ;
2023-12-30 22:17:20 +00:00
gpu_copy_buffers ( state . stream , view . buffer , state . defaultBuffer - > gpu , view . offset , state . defaultBuffer - > base , sizeof ( data ) ) ;
2022-06-19 06:31:51 +00:00
2023-11-02 20:38:21 +00:00
Image * image = lovrImageCreateRaw ( 4 , 4 , FORMAT_RGBA8 , false ) ;
2022-06-04 21:54:04 +00:00
float white [ 4 ] = { 1.f , 1.f , 1.f , 1.f } ;
for ( uint32_t y = 0 ; y < 4 ; y + + ) {
for ( uint32_t x = 0 ; x < 4 ; x + + ) {
lovrImageSetPixel ( image , x , y , white ) ;
}
}
2022-06-01 04:41:43 +00:00
state . defaultTexture = lovrTextureCreate ( & ( TextureInfo ) {
2022-05-24 05:32:12 +00:00
. type = TEXTURE_2D ,
2022-06-04 21:54:04 +00:00
. usage = TEXTURE_SAMPLE ,
2022-05-24 05:32:12 +00:00
. format = FORMAT_RGBA8 ,
. width = 4 ,
. height = 4 ,
2022-07-30 22:08:30 +00:00
. layers = 1 ,
2022-05-24 05:32:12 +00:00
. mipmaps = 1 ,
2023-11-02 20:16:00 +00:00
. srgb = false ,
2022-06-04 21:54:04 +00:00
. imageCount = 1 ,
. images = & image ,
2022-05-24 05:32:12 +00:00
. label = " Default Texture "
2022-06-01 04:41:43 +00:00
} ) ;
2022-05-24 05:32:12 +00:00
2022-06-04 21:54:04 +00:00
lovrRelease ( image , lovrImageDestroy ) ;
2022-06-08 03:42:10 +00:00
for ( uint32_t i = 0 ; i < 2 ; i + + ) {
state . defaultSamplers [ i ] = lovrSamplerCreate ( & ( SamplerInfo ) {
. min = i = = 0 ? FILTER_NEAREST : FILTER_LINEAR ,
. mag = i = = 0 ? FILTER_NEAREST : FILTER_LINEAR ,
. mip = i = = 0 ? FILTER_NEAREST : FILTER_LINEAR ,
2022-09-13 01:09:29 +00:00
. wrap = { WRAP_REPEAT , WRAP_REPEAT , WRAP_REPEAT } ,
. range = { 0.f , - 1.f }
2022-06-08 03:42:10 +00:00
} ) ;
}
2022-05-24 05:32:12 +00:00
2022-06-16 03:46:43 +00:00
state . vertexFormats [ VERTEX_SHAPE ] = ( gpu_vertex_format ) {
2022-06-04 08:34:13 +00:00
. bufferCount = 2 ,
2022-06-16 03:46:43 +00:00
. attributeCount = 5 ,
2022-06-19 06:31:51 +00:00
. bufferStrides [ 0 ] = sizeof ( ShapeVertex ) ,
. attributes [ 0 ] = { 0 , 10 , offsetof ( ShapeVertex , position ) , GPU_TYPE_F32x3 } ,
. attributes [ 1 ] = { 0 , 11 , offsetof ( ShapeVertex , normal ) , GPU_TYPE_F32x3 } ,
. attributes [ 2 ] = { 0 , 12 , offsetof ( ShapeVertex , uv ) , GPU_TYPE_F32x2 } ,
. attributes [ 3 ] = { 1 , 13 , 16 , GPU_TYPE_F32x4 } ,
. attributes [ 4 ] = { 1 , 14 , 0 , GPU_TYPE_F32x4 }
2022-06-04 08:34:13 +00:00
} ;
2022-06-16 03:46:43 +00:00
state . vertexFormats [ VERTEX_POINT ] = ( gpu_vertex_format ) {
2022-06-01 04:20:01 +00:00
. bufferCount = 2 ,
2022-06-19 00:43:12 +00:00
. attributeCount = 5 ,
2022-06-21 01:26:15 +00:00
. bufferStrides [ 0 ] = 12 ,
2022-06-19 06:31:51 +00:00
. attributes [ 0 ] = { 0 , 10 , 0 , GPU_TYPE_F32x3 } ,
. attributes [ 1 ] = { 1 , 11 , 0 , GPU_TYPE_F32x4 } ,
. attributes [ 2 ] = { 1 , 12 , 0 , GPU_TYPE_F32x4 } ,
. attributes [ 3 ] = { 1 , 13 , 16 , GPU_TYPE_F32x4 } ,
. attributes [ 4 ] = { 1 , 14 , 0 , GPU_TYPE_F32x4 }
2022-05-30 22:36:31 +00:00
} ;
2022-06-19 00:43:12 +00:00
state . vertexFormats [ VERTEX_GLYPH ] = ( gpu_vertex_format ) {
. bufferCount = 2 ,
. attributeCount = 5 ,
2022-06-21 01:26:15 +00:00
. bufferStrides [ 0 ] = sizeof ( GlyphVertex ) ,
2022-06-19 06:31:51 +00:00
. attributes [ 0 ] = { 0 , 10 , offsetof ( GlyphVertex , position ) , GPU_TYPE_F32x2 } ,
. attributes [ 1 ] = { 1 , 11 , 0 , GPU_TYPE_F32x4 } ,
2022-06-27 03:28:30 +00:00
. attributes [ 2 ] = { 0 , 12 , offsetof ( GlyphVertex , uv ) , GPU_TYPE_UN16x2 } ,
. attributes [ 3 ] = { 0 , 13 , offsetof ( GlyphVertex , color ) , GPU_TYPE_UN8x4 } ,
2022-06-19 06:31:51 +00:00
. attributes [ 4 ] = { 1 , 14 , 0 , GPU_TYPE_F32x4 }
2022-06-19 00:43:12 +00:00
} ;
2022-07-04 00:26:31 +00:00
state . vertexFormats [ VERTEX_MODEL ] = ( gpu_vertex_format ) {
. bufferCount = 2 ,
. attributeCount = 5 ,
. bufferStrides [ 0 ] = sizeof ( ModelVertex ) ,
. attributes [ 0 ] = { 0 , 10 , offsetof ( ModelVertex , position ) , GPU_TYPE_F32x3 } ,
2024-01-21 01:37:00 +00:00
. attributes [ 1 ] = { 0 , 11 , offsetof ( ModelVertex , normal ) , GPU_TYPE_SN10x3 } ,
2022-07-04 00:26:31 +00:00
. attributes [ 2 ] = { 0 , 12 , offsetof ( ModelVertex , uv ) , GPU_TYPE_F32x2 } ,
. attributes [ 3 ] = { 0 , 13 , offsetof ( ModelVertex , color ) , GPU_TYPE_UN8x4 } ,
2024-01-21 01:37:00 +00:00
. attributes [ 4 ] = { 0 , 14 , offsetof ( ModelVertex , tangent ) , GPU_TYPE_SN10x3 }
2022-07-04 00:26:31 +00:00
} ;
2022-06-25 02:59:48 +00:00
state . vertexFormats [ VERTEX_EMPTY ] = ( gpu_vertex_format ) {
. bufferCount = 2 ,
. attributeCount = 5 ,
2023-04-30 06:02:37 +00:00
. attributes [ 0 ] = { 1 , 10 , 0 , GPU_TYPE_F32x3 } ,
. attributes [ 1 ] = { 1 , 11 , 0 , GPU_TYPE_F32x3 } ,
2022-06-25 02:59:48 +00:00
. attributes [ 2 ] = { 1 , 12 , 0 , GPU_TYPE_F32x2 } ,
. attributes [ 3 ] = { 1 , 13 , 16 , GPU_TYPE_F32x4 } ,
. attributes [ 4 ] = { 1 , 14 , 0 , GPU_TYPE_F32x4 }
} ;
2022-06-17 06:49:09 +00:00
state . defaultMaterial = lovrMaterialCreate ( & ( MaterialInfo ) {
2022-06-19 06:31:51 +00:00
. data . color = { 1.f , 1.f , 1.f , 1.f } ,
2022-08-09 03:40:57 +00:00
. data . uvScale = { 1.f , 1.f } ,
2022-08-10 02:49:47 +00:00
. data . metalness = 0.f ,
. data . roughness = 1.f ,
2022-08-13 00:44:29 +00:00
. data . normalScale = 1.f ,
2022-06-19 06:31:51 +00:00
. texture = state . defaultTexture
2022-06-17 06:49:09 +00:00
} ) ;
2022-06-01 04:45:54 +00:00
float16Init ( ) ;
2023-02-08 06:48:50 +00:00
# ifdef LOVR_USE_GLSLANG
2022-06-01 04:45:54 +00:00
glslang_initialize_process ( ) ;
2023-02-08 06:48:50 +00:00
# endif
2022-04-19 02:30:58 +00:00
return true ;
2016-09-28 03:20:08 +00:00
}
2023-04-26 04:45:30 +00:00
void lovrGraphicsDestroy ( void ) {
2023-11-24 01:07:44 +00:00
if ( atomic_fetch_sub ( & state . ref , 1 ) ! = 1 ) return ;
2022-08-09 06:27:35 +00:00
# ifndef LOVR_DISABLE_HEADSET
// If there's an active headset session it needs to be stopped so it can clean up its Pass and
// swapchain textures before gpu_destroy is called. This is really hacky and should be solved
// with module-level refcounting in the future.
if ( lovrHeadsetInterface & & lovrHeadsetInterface - > stop ) {
lovrHeadsetInterface - > stop ( ) ;
}
# endif
2023-05-07 06:36:33 +00:00
Readback * readback = state . oldestReadback ;
while ( readback ) {
Readback * next = readback - > next ;
lovrReadbackDestroy ( readback ) ;
readback = next ;
2022-07-15 02:23:02 +00:00
}
2023-05-07 06:36:33 +00:00
if ( state . timestamps ) gpu_tally_destroy ( state . timestamps ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( state . timestamps ) ;
2022-06-01 04:45:54 +00:00
lovrRelease ( state . window , lovrTextureDestroy ) ;
2022-08-03 05:00:11 +00:00
lovrRelease ( state . windowPass , lovrPassDestroy ) ;
2022-06-21 01:26:15 +00:00
lovrRelease ( state . defaultFont , lovrFontDestroy ) ;
2022-06-01 04:45:54 +00:00
lovrRelease ( state . defaultBuffer , lovrBufferDestroy ) ;
lovrRelease ( state . defaultTexture , lovrTextureDestroy ) ;
2022-06-08 03:42:10 +00:00
lovrRelease ( state . defaultSamplers [ 0 ] , lovrSamplerDestroy ) ;
lovrRelease ( state . defaultSamplers [ 1 ] , lovrSamplerDestroy ) ;
2022-08-07 01:05:30 +00:00
for ( size_t i = 0 ; i < COUNTOF ( state . defaultShaders ) ; i + + ) {
2022-06-01 04:45:54 +00:00
lovrRelease ( state . defaultShaders [ i ] , lovrShaderDestroy ) ;
}
2022-06-17 06:49:09 +00:00
lovrRelease ( state . defaultMaterial , lovrMaterialDestroy ) ;
2022-08-07 01:05:30 +00:00
for ( size_t i = 0 ; i < state . materialBlocks . length ; i + + ) {
2022-06-17 06:49:09 +00:00
MaterialBlock * block = & state . materialBlocks . data [ i ] ;
2024-01-14 10:28:10 +00:00
BufferBlock * current = state . bufferAllocators [ GPU_BUFFER_STATIC ] . current ;
if ( block - > view . block ! = current & & atomic_fetch_sub ( & block - > view . block - > ref , 1 ) = = 1 ) {
2023-12-30 22:17:20 +00:00
freeBlock ( & state . bufferAllocators [ GPU_BUFFER_STATIC ] , block - > view . block ) ;
}
2022-06-17 06:49:09 +00:00
gpu_bundle_pool_destroy ( block - > bundlePool ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( block - > list ) ;
lovrFree ( block - > bundlePool ) ;
lovrFree ( block - > bundles ) ;
2022-06-17 06:49:09 +00:00
}
arr_free ( & state . materialBlocks ) ;
2022-08-26 04:57:15 +00:00
for ( size_t i = 0 ; i < state . scratchTextures . length ; i + + ) {
gpu_texture_destroy ( state . scratchTextures . data [ i ] . texture ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( state . scratchTextures . data [ i ] . texture ) ;
2022-08-26 04:57:15 +00:00
}
arr_free ( & state . scratchTextures ) ;
2023-11-30 08:14:06 +00:00
for ( size_t i = 0 ; i < state . pipelineCount ; i + + ) {
gpu_pipeline_destroy ( getPipeline ( i ) ) ;
2022-05-24 04:44:42 +00:00
}
2023-11-30 08:14:06 +00:00
os_vm_free ( state . pipelines , MAX_PIPELINES * gpu_sizeof_pipeline ( ) ) ;
2022-06-01 04:45:54 +00:00
map_free ( & state . pipelineLookup ) ;
2024-01-12 01:22:58 +00:00
for ( size_t i = 0 ; i < state . passLookup . size ; i + + ) {
if ( state . passLookup . values [ i ] ! = MAP_NIL ) {
2024-01-17 23:48:34 +00:00
gpu_pass * pass = ( gpu_pass * ) ( uintptr_t ) state . passLookup . values [ i ] ;
gpu_pass_destroy ( pass ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( pass ) ;
2024-01-12 01:22:58 +00:00
}
}
map_free ( & state . passLookup ) ;
2023-12-30 20:39:50 +00:00
for ( size_t i = 0 ; i < COUNTOF ( state . bufferAllocators ) ; i + + ) {
2023-12-30 22:17:20 +00:00
BufferBlock * block = state . bufferAllocators [ i ] . freelist ;
while ( block ) {
2023-12-30 20:39:50 +00:00
gpu_buffer_destroy ( block - > handle ) ;
2023-12-30 22:17:20 +00:00
BufferBlock * next = block - > next ;
2024-03-11 21:38:00 +00:00
lovrFree ( block ) ;
2023-12-30 22:17:20 +00:00
block = next ;
2023-12-30 20:39:50 +00:00
}
BufferBlock * current = state . bufferAllocators [ i ] . current ;
if ( current ) {
gpu_buffer_destroy ( current - > handle ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( current ) ;
2023-04-28 02:48:12 +00:00
}
}
2022-08-07 01:05:30 +00:00
for ( size_t i = 0 ; i < state . layouts . length ; i + + ) {
2022-05-30 20:12:04 +00:00
BundlePool * pool = state . layouts . data [ i ] . head ;
while ( pool ) {
BundlePool * next = pool - > next ;
gpu_bundle_pool_destroy ( pool - > gpu ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( pool - > gpu ) ;
lovrFree ( pool - > bundles ) ;
lovrFree ( pool ) ;
2022-05-30 20:12:04 +00:00
pool = next ;
}
2022-05-24 04:40:57 +00:00
gpu_layout_destroy ( state . layouts . data [ i ] . gpu ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( state . layouts . data [ i ] . gpu ) ;
2022-05-24 04:40:57 +00:00
}
arr_free ( & state . layouts ) ;
2022-04-21 07:27:13 +00:00
gpu_destroy ( ) ;
2023-02-08 06:48:50 +00:00
# ifdef LOVR_USE_GLSLANG
2022-05-09 18:47:06 +00:00
glslang_finalize_process ( ) ;
2023-02-08 06:48:50 +00:00
# endif
2022-08-23 03:30:09 +00:00
os_vm_free ( state . allocator . memory , state . allocator . limit ) ;
2019-06-27 08:47:08 +00:00
memset ( & state , 0 , sizeof ( state ) ) ;
2016-07-07 07:04:24 +00:00
}
2022-04-21 07:27:13 +00:00
2023-04-26 04:45:30 +00:00
bool lovrGraphicsIsInitialized ( void ) {
2023-11-24 01:07:44 +00:00
return state . ref ;
2022-11-08 03:12:11 +00:00
}
2022-04-21 09:16:17 +00:00
void lovrGraphicsGetDevice ( GraphicsDevice * device ) {
2022-04-22 20:28:59 +00:00
device - > deviceId = state . device . deviceId ;
device - > vendorId = state . device . vendorId ;
device - > name = state . device . deviceName ;
2022-04-21 09:16:17 +00:00
device - > renderer = state . device . renderer ;
device - > subgroupSize = state . device . subgroupSize ;
2022-04-22 20:28:59 +00:00
device - > discrete = state . device . discrete ;
2022-04-21 09:16:17 +00:00
}
void lovrGraphicsGetFeatures ( GraphicsFeatures * features ) {
2022-04-26 22:31:51 +00:00
features - > textureBC = state . features . textureBC ;
features - > textureASTC = state . features . textureASTC ;
2022-04-21 09:16:17 +00:00
features - > wireframe = state . features . wireframe ;
features - > depthClamp = state . features . depthClamp ;
2023-01-23 07:26:15 +00:00
features - > depthResolve = state . features . depthResolve ;
2022-04-21 09:16:17 +00:00
features - > indirectDrawFirstInstance = state . features . indirectDrawFirstInstance ;
features - > float64 = state . features . float64 ;
features - > int64 = state . features . int64 ;
features - > int16 = state . features . int16 ;
}
void lovrGraphicsGetLimits ( GraphicsLimits * limits ) {
limits - > textureSize2D = state . limits . textureSize2D ;
limits - > textureSize3D = state . limits . textureSize3D ;
limits - > textureSizeCube = state . limits . textureSizeCube ;
limits - > textureLayers = state . limits . textureLayers ;
limits - > renderSize [ 0 ] = state . limits . renderSize [ 0 ] ;
limits - > renderSize [ 1 ] = state . limits . renderSize [ 1 ] ;
limits - > renderSize [ 2 ] = state . limits . renderSize [ 2 ] ;
2022-07-04 22:54:43 +00:00
limits - > uniformBuffersPerStage = MIN ( state . limits . uniformBuffersPerStage - 3 , MAX_SHADER_RESOURCES ) ;
2022-06-04 21:28:23 +00:00
limits - > storageBuffersPerStage = MIN ( state . limits . storageBuffersPerStage , MAX_SHADER_RESOURCES ) ;
2022-07-04 22:54:43 +00:00
limits - > sampledTexturesPerStage = MIN ( state . limits . sampledTexturesPerStage - 7 , MAX_SHADER_RESOURCES ) ;
2022-06-04 21:28:23 +00:00
limits - > storageTexturesPerStage = MIN ( state . limits . storageTexturesPerStage , MAX_SHADER_RESOURCES ) ;
limits - > samplersPerStage = MIN ( state . limits . samplersPerStage - 1 , MAX_SHADER_RESOURCES ) ;
limits - > resourcesPerShader = MAX_SHADER_RESOURCES ;
2022-04-21 09:16:17 +00:00
limits - > uniformBufferRange = state . limits . uniformBufferRange ;
limits - > storageBufferRange = state . limits . storageBufferRange ;
limits - > uniformBufferAlign = state . limits . uniformBufferAlign ;
limits - > storageBufferAlign = state . limits . storageBufferAlign ;
2022-06-16 03:46:43 +00:00
limits - > vertexAttributes = 10 ;
2022-04-21 09:16:17 +00:00
limits - > vertexBufferStride = state . limits . vertexBufferStride ;
2022-06-16 03:46:43 +00:00
limits - > vertexShaderOutputs = 10 ;
2022-04-26 22:31:51 +00:00
limits - > clipDistances = state . limits . clipDistances ;
limits - > cullDistances = state . limits . cullDistances ;
limits - > clipAndCullDistances = state . limits . clipAndCullDistances ;
2022-08-06 20:06:42 +00:00
memcpy ( limits - > workgroupCount , state . limits . workgroupCount , 3 * sizeof ( uint32_t ) ) ;
memcpy ( limits - > workgroupSize , state . limits . workgroupSize , 3 * sizeof ( uint32_t ) ) ;
limits - > totalWorkgroupSize = state . limits . totalWorkgroupSize ;
2022-04-21 09:16:17 +00:00
limits - > computeSharedMemory = state . limits . computeSharedMemory ;
2022-08-26 04:57:15 +00:00
limits - > shaderConstantSize = state . limits . pushConstantSize ;
2022-04-21 09:16:17 +00:00
limits - > indirectDrawCount = state . limits . indirectDrawCount ;
limits - > instances = state . limits . instances ;
limits - > anisotropy = state . limits . anisotropy ;
limits - > pointSize = state . limits . pointSize ;
}
2023-07-11 02:21:11 +00:00
uint32_t lovrGraphicsGetFormatSupport ( uint32_t format , uint32_t features ) {
uint32_t support = 0 ;
for ( uint32_t i = 0 ; i < 2 ; i + + ) {
uint8_t supports = state . features . formats [ format ] [ i ] ;
if ( features ) {
support | =
( ( ( ~ features & TEXTURE_FEATURE_SAMPLE ) | | ( supports & GPU_FEATURE_SAMPLE ) ) & &
( ( ~ features & TEXTURE_FEATURE_RENDER ) | | ( supports & GPU_FEATURE_RENDER ) ) & &
( ( ~ features & TEXTURE_FEATURE_STORAGE ) | | ( supports & GPU_FEATURE_STORAGE ) ) & &
2023-11-02 22:33:29 +00:00
( ( ~ features & TEXTURE_FEATURE_BLIT ) | | ( supports & GPU_FEATURE_BLIT ) ) ) < < i ;
2023-07-11 02:21:11 +00:00
} else {
support | = ! ! supports < < i ;
}
}
return support ;
2022-04-30 00:12:10 +00:00
}
2022-08-03 05:05:12 +00:00
void lovrGraphicsGetShaderCache ( void * data , size_t * size ) {
gpu_pipeline_get_cache ( data , size ) ;
}
2022-05-11 22:38:01 +00:00
2022-08-14 04:10:58 +00:00
void lovrGraphicsGetBackgroundColor ( float background [ 4 ] ) {
2022-08-06 04:05:02 +00:00
background [ 0 ] = lovrMathLinearToGamma ( state . background [ 0 ] ) ;
background [ 1 ] = lovrMathLinearToGamma ( state . background [ 1 ] ) ;
background [ 2 ] = lovrMathLinearToGamma ( state . background [ 2 ] ) ;
background [ 3 ] = state . background [ 3 ] ;
}
2022-08-14 04:10:58 +00:00
void lovrGraphicsSetBackgroundColor ( float background [ 4 ] ) {
2022-08-06 04:05:02 +00:00
state . background [ 0 ] = lovrMathGammaToLinear ( background [ 0 ] ) ;
state . background [ 1 ] = lovrMathGammaToLinear ( background [ 1 ] ) ;
state . background [ 2 ] = lovrMathGammaToLinear ( background [ 2 ] ) ;
state . background [ 3 ] = background [ 3 ] ;
}
2023-05-07 06:36:33 +00:00
bool lovrGraphicsIsTimingEnabled ( void ) {
return state . timingEnabled ;
}
void lovrGraphicsSetTimingEnabled ( bool enable ) {
state . timingEnabled = enable ;
}
2023-06-23 21:41:39 +00:00
static void recordComputePass ( Pass * pass , gpu_stream * stream ) {
2023-04-30 06:02:37 +00:00
if ( pass - > computeCount = = 0 ) {
return ;
}
gpu_pipeline * pipeline = NULL ;
gpu_bundle_info * bundleInfo = NULL ;
2024-02-20 23:07:30 +00:00
gpu_bundle * uniformBundle = NULL ;
gpu_buffer * uniformBuffer = NULL ;
uint32_t uniformOffset = 0 ;
2022-04-29 05:30:31 +00:00
2023-04-30 06:02:37 +00:00
gpu_compute_begin ( stream ) ;
2022-04-29 05:30:31 +00:00
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < pass - > computeCount ; i + + ) {
Compute * compute = & pass - > computes [ i ] ;
2022-06-12 02:07:46 +00:00
2023-04-30 06:02:37 +00:00
if ( compute - > shader - > computePipeline ! = pipeline ) {
gpu_bind_pipeline ( stream , compute - > shader - > computePipeline , GPU_PIPELINE_COMPUTE ) ;
pipeline = compute - > shader - > computePipeline ;
}
if ( compute - > bundleInfo ! = bundleInfo ) {
bundleInfo = compute - > bundleInfo ;
2023-10-02 16:07:50 +00:00
gpu_bundle * bundle = getBundle ( compute - > shader - > layout , bundleInfo - > bindings , bundleInfo - > count ) ;
gpu_bind_bundles ( stream , compute - > shader - > gpu , & bundle , 0 , 1 , NULL , 0 ) ;
2023-04-30 06:02:37 +00:00
}
2024-02-20 23:07:30 +00:00
if ( compute - > uniformBuffer ! = uniformBuffer | | compute - > uniformOffset ! = uniformOffset ) {
if ( compute - > uniformBuffer ! = uniformBuffer ) {
uniformBundle = getBundle ( LAYOUT_UNIFORMS , & ( gpu_binding ) {
. number = 0 ,
. type = GPU_SLOT_UNIFORM_BUFFER_DYNAMIC ,
. buffer . object = compute - > uniformBuffer ,
. buffer . extent = compute - > shader - > uniformSize
} , 1 ) ;
}
gpu_bind_bundles ( stream , compute - > shader - > gpu , & uniformBundle , 1 , 1 , & compute - > uniformOffset , 1 ) ;
uniformBuffer = compute - > uniformBuffer ;
uniformOffset = compute - > uniformOffset ;
2023-04-30 06:02:37 +00:00
}
if ( compute - > flags & COMPUTE_INDIRECT ) {
gpu_compute_indirect ( stream , compute - > indirect . buffer , compute - > indirect . offset ) ;
} else {
gpu_compute ( stream , compute - > x , compute - > y , compute - > z ) ;
}
if ( ( compute - > flags & COMPUTE_BARRIER ) & & i < pass - > computeCount - 1 ) {
gpu_sync ( stream , & ( gpu_barrier ) {
. prev = GPU_PHASE_SHADER_COMPUTE ,
2023-10-11 15:26:06 +00:00
. next = GPU_PHASE_INDIRECT | GPU_PHASE_SHADER_COMPUTE ,
2023-04-30 06:02:37 +00:00
. flush = GPU_CACHE_STORAGE_WRITE ,
. clear = GPU_CACHE_INDIRECT | GPU_CACHE_UNIFORM | GPU_CACHE_TEXTURE | GPU_CACHE_STORAGE_READ
} , 1 ) ;
}
}
gpu_compute_end ( stream ) ;
}
2023-06-23 21:41:39 +00:00
static void recordRenderPass ( Pass * pass , gpu_stream * stream ) {
2023-04-30 06:02:37 +00:00
Canvas * canvas = & pass - > canvas ;
if ( canvas - > count = = 0 & & ! canvas - > depth . texture ) {
return ;
2022-04-29 05:30:31 +00:00
}
2023-04-30 06:02:37 +00:00
// Canvas
gpu_canvas target = { 0 } ;
Texture * texture = canvas - > color [ 0 ] . texture ;
for ( uint32_t i = 0 ; i < canvas - > count ; i + + , texture = canvas - > color [ i ] . texture ) {
target . color [ i ] = ( gpu_color_attachment ) {
2024-03-02 01:13:06 +00:00
. texture = canvas - > resolve ? getScratchTexture ( stream , canvas , texture - > info . format , texture - > info . srgb ) : texture - > renderView ,
2023-04-30 06:02:37 +00:00
. resolve = canvas - > resolve ? texture - > renderView : NULL ,
. clear [ 0 ] = canvas - > color [ i ] . clear [ 0 ] ,
. clear [ 1 ] = canvas - > color [ i ] . clear [ 1 ] ,
. clear [ 2 ] = canvas - > color [ i ] . clear [ 2 ] ,
. clear [ 3 ] = canvas - > color [ i ] . clear [ 3 ]
} ;
2022-06-17 06:49:09 +00:00
}
2023-04-30 06:02:37 +00:00
if ( ( texture = canvas - > depth . texture ) ! = NULL | | canvas - > depth . format ) {
target . depth = ( gpu_depth_attachment ) {
2024-03-02 01:13:06 +00:00
. texture = canvas - > resolve | | ! texture ? getScratchTexture ( stream , canvas , canvas - > depth . format , false ) : texture - > renderView ,
2023-04-30 06:02:37 +00:00
. resolve = canvas - > resolve & & texture ? texture - > renderView : NULL ,
2024-01-12 01:22:58 +00:00
. clear = canvas - > depth . clear
2023-04-30 06:02:37 +00:00
} ;
2022-06-21 01:26:15 +00:00
}
2024-01-12 01:22:58 +00:00
target . pass = pass - > gpu ;
target . width = canvas - > width ;
target . height = canvas - > height ;
2023-06-23 21:41:39 +00:00
// Cameras
Camera * camera = pass - > cameras ;
for ( uint32_t c = 0 ; c < pass - > cameraCount ; c + + ) {
for ( uint32_t v = 0 ; v < canvas - > views ; v + + , camera + + ) {
mat4_init ( camera - > viewProjection , camera - > projection ) ;
mat4_init ( camera - > inverseProjection , camera - > projection ) ;
mat4_mul ( camera - > viewProjection , camera - > viewMatrix ) ;
mat4_invert ( camera - > inverseProjection ) ;
}
}
// Frustum Culling
uint32_t activeDrawCount = 0 ;
uint16_t * activeDraws = tempAlloc ( & state . allocator , pass - > drawCount * sizeof ( uint16_t ) ) ;
if ( pass - > flags & NEEDS_VIEW_CULL ) {
typedef struct { float planes [ 6 ] [ 4 ] ; } Frustum ;
Frustum * frusta = tempAlloc ( & state . allocator , canvas - > views * sizeof ( Frustum ) ) ;
uint32_t drawIndex = 0 ;
for ( uint32_t c = 0 ; c < pass - > cameraCount ; c + + ) {
for ( uint32_t v = 0 ; v < canvas - > views ; v + + ) {
float * m = pass - > cameras [ c * canvas - > views + v ] . viewProjection ;
memcpy ( frusta [ v ] . planes , ( float [ 6 ] [ 4 ] ) {
{ ( m [ 3 ] + m [ 0 ] ) , ( m [ 7 ] + m [ 4 ] ) , ( m [ 11 ] + m [ 8 ] ) , ( m [ 15 ] + m [ 12 ] ) } , // Left
{ ( m [ 3 ] - m [ 0 ] ) , ( m [ 7 ] - m [ 4 ] ) , ( m [ 11 ] - m [ 8 ] ) , ( m [ 15 ] - m [ 12 ] ) } , // Right
{ ( m [ 3 ] + m [ 1 ] ) , ( m [ 7 ] + m [ 5 ] ) , ( m [ 11 ] + m [ 9 ] ) , ( m [ 15 ] + m [ 13 ] ) } , // Bottom
{ ( m [ 3 ] - m [ 1 ] ) , ( m [ 7 ] - m [ 5 ] ) , ( m [ 11 ] - m [ 9 ] ) , ( m [ 15 ] - m [ 13 ] ) } , // Top
{ m [ 2 ] , m [ 6 ] , m [ 10 ] , m [ 14 ] } , // Near
{ ( m [ 3 ] - m [ 2 ] ) , ( m [ 7 ] - m [ 6 ] ) , ( m [ 11 ] - m [ 10 ] ) , ( m [ 15 ] - m [ 14 ] ) } // Far
} , sizeof ( Frustum ) ) ;
}
while ( drawIndex < pass - > drawCount ) {
2023-11-18 10:02:05 +00:00
Draw * draw = & pass - > draws [ drawIndex ] ;
2023-06-23 21:41:39 +00:00
if ( draw - > camera ! = c ) {
break ;
}
if ( ~ draw - > flags & DRAW_HAS_BOUNDS ) {
activeDraws [ activeDrawCount + + ] = drawIndex + + ;
continue ;
}
2023-10-12 01:21:11 +00:00
float * center = draw - > bounds + 0 ;
float * extent = draw - > bounds + 3 ;
float corners [ 8 ] [ 3 ] = {
{ center [ 0 ] - extent [ 0 ] , center [ 1 ] - extent [ 1 ] , center [ 2 ] - extent [ 2 ] } ,
{ center [ 0 ] - extent [ 0 ] , center [ 1 ] - extent [ 1 ] , center [ 2 ] + extent [ 2 ] } ,
{ center [ 0 ] - extent [ 0 ] , center [ 1 ] + extent [ 1 ] , center [ 2 ] - extent [ 2 ] } ,
{ center [ 0 ] - extent [ 0 ] , center [ 1 ] + extent [ 1 ] , center [ 2 ] + extent [ 2 ] } ,
{ center [ 0 ] + extent [ 0 ] , center [ 1 ] - extent [ 1 ] , center [ 2 ] - extent [ 2 ] } ,
{ center [ 0 ] + extent [ 0 ] , center [ 1 ] - extent [ 1 ] , center [ 2 ] + extent [ 2 ] } ,
{ center [ 0 ] + extent [ 0 ] , center [ 1 ] + extent [ 1 ] , center [ 2 ] - extent [ 2 ] } ,
{ center [ 0 ] + extent [ 0 ] , center [ 1 ] + extent [ 1 ] , center [ 2 ] + extent [ 2 ] }
} ;
for ( uint32_t i = 0 ; i < COUNTOF ( corners ) ; i + + ) {
mat4_mulPoint ( draw - > transform , corners [ i ] ) ;
}
2023-06-23 21:41:39 +00:00
uint32_t visible = canvas - > views ;
for ( uint32_t v = 0 ; v < canvas - > views ; v + + ) {
for ( uint32_t p = 0 ; p < 6 ; p + + ) {
2023-10-12 01:21:11 +00:00
bool inside = false ;
2023-06-23 21:41:39 +00:00
2023-10-12 01:21:11 +00:00
for ( uint32_t c = 0 ; c < COUNTOF ( corners ) ; c + + ) {
if ( vec3_dot ( corners [ c ] , frusta [ v ] . planes [ p ] ) + frusta [ v ] . planes [ p ] [ 3 ] > 0.f ) {
inside = true ;
break ;
}
}
2023-06-23 21:41:39 +00:00
if ( ! inside ) {
visible - - ;
break ;
}
}
}
if ( visible ) {
activeDraws [ activeDrawCount + + ] = drawIndex ;
}
drawIndex + + ;
}
}
} else {
for ( uint32_t i = 0 ; i < pass - > drawCount ; i + + ) {
activeDraws [ activeDrawCount + + ] = i ;
}
}
pass - > stats . drawsCulled = pass - > drawCount - activeDrawCount ;
if ( activeDrawCount = = 0 ) {
2023-04-30 06:02:37 +00:00
gpu_render_begin ( stream , & target ) ;
2024-01-12 01:22:58 +00:00
gpu_render_end ( stream , & target ) ;
2023-04-30 06:02:37 +00:00
return ;
2022-07-04 00:26:31 +00:00
}
2023-04-30 06:02:37 +00:00
// Builtins
2022-07-31 20:02:41 +00:00
2023-04-30 06:02:37 +00:00
gpu_binding builtins [ ] = {
{ 0 , GPU_SLOT_UNIFORM_BUFFER , . buffer = { 0 } } ,
2023-06-06 02:44:53 +00:00
{ 1 , GPU_SLOT_UNIFORM_BUFFER_DYNAMIC , . buffer = { 0 } } ,
2023-04-30 06:02:37 +00:00
{ 2 , GPU_SLOT_UNIFORM_BUFFER_DYNAMIC , . buffer = { 0 } } ,
{ 3 , GPU_SLOT_SAMPLER , . sampler = pass - > sampler ? pass - > sampler - > gpu : state . defaultSamplers [ FILTER_LINEAR ] - > gpu }
} ;
2022-07-31 20:02:41 +00:00
2023-12-30 20:39:50 +00:00
BufferView view ;
2023-04-30 06:02:37 +00:00
size_t align = state . limits . uniformBufferAlign ;
2022-06-12 02:07:46 +00:00
2023-04-30 06:02:37 +00:00
// Globals
2023-12-30 20:39:50 +00:00
view = getBuffer ( GPU_BUFFER_STREAM , sizeof ( Globals ) , align ) ;
builtins [ 0 ] . buffer = ( gpu_buffer_binding ) { view . buffer , view . offset , view . extent } ;
Globals * global = view . pointer ;
2023-04-30 06:02:37 +00:00
global - > resolution [ 0 ] = canvas - > width ;
global - > resolution [ 1 ] = canvas - > height ;
global - > time = lovrHeadsetInterface ? lovrHeadsetInterface - > getDisplayTime ( ) : os_get_time ( ) ;
2022-08-09 04:26:07 +00:00
2023-10-02 16:07:50 +00:00
// Cameras
2023-12-30 20:39:50 +00:00
view = getBuffer ( GPU_BUFFER_STREAM , pass - > cameraCount * canvas - > views * sizeof ( Camera ) , align ) ;
builtins [ 1 ] . buffer = ( gpu_buffer_binding ) { view . buffer , view . offset , view . extent } ;
memcpy ( view . pointer , pass - > cameras , pass - > cameraCount * canvas - > views * sizeof ( Camera ) ) ;
2023-04-30 06:02:37 +00:00
// DrawData
2023-10-11 20:55:25 +00:00
uint32_t alignedDrawCount = activeDrawCount < = 256 ? activeDrawCount : ALIGN ( activeDrawCount , 256 ) ;
2023-12-30 20:39:50 +00:00
view = getBuffer ( GPU_BUFFER_STREAM , alignedDrawCount * sizeof ( DrawData ) , align ) ;
builtins [ 2 ] . buffer = ( gpu_buffer_binding ) { view . buffer , view . offset , MIN ( activeDrawCount , 256 ) * sizeof ( DrawData ) } ;
DrawData * data = view . pointer ;
2023-04-30 06:02:37 +00:00
2023-06-23 21:41:39 +00:00
for ( uint32_t i = 0 ; i < activeDrawCount ; i + + , data + + ) {
2023-11-18 10:02:05 +00:00
Draw * draw = & pass - > draws [ activeDraws [ i ] ] ;
2023-08-02 01:45:37 +00:00
// transform is provided as 4x3 row-major matrix for packing reasons, need to transpose
data - > transform [ 0 ] = draw - > transform [ 0 ] ;
data - > transform [ 1 ] = draw - > transform [ 4 ] ;
data - > transform [ 2 ] = draw - > transform [ 8 ] ;
data - > transform [ 3 ] = draw - > transform [ 12 ] ;
data - > transform [ 4 ] = draw - > transform [ 1 ] ;
data - > transform [ 5 ] = draw - > transform [ 5 ] ;
data - > transform [ 6 ] = draw - > transform [ 9 ] ;
data - > transform [ 7 ] = draw - > transform [ 13 ] ;
data - > transform [ 8 ] = draw - > transform [ 2 ] ;
data - > transform [ 9 ] = draw - > transform [ 6 ] ;
data - > transform [ 10 ] = draw - > transform [ 10 ] ;
data - > transform [ 11 ] = draw - > transform [ 14 ] ;
data - > color [ 0 ] = draw - > color [ 0 ] ;
data - > color [ 1 ] = draw - > color [ 1 ] ;
data - > color [ 2 ] = draw - > color [ 2 ] ;
data - > color [ 3 ] = draw - > color [ 3 ] ;
2022-04-29 05:30:31 +00:00
}
2024-02-24 19:49:11 +00:00
gpu_bundle * builtinBundle = getBundle ( LAYOUT_BUILTINS , builtins , COUNTOF ( builtins ) ) ;
2022-06-12 05:55:43 +00:00
2023-04-30 06:02:37 +00:00
// Pipelines
2022-06-12 05:55:43 +00:00
2023-11-18 10:02:05 +00:00
if ( ! pass - > draws [ pass - > drawCount - 1 ] . pipeline ) {
uint32_t first = 0 ;
2022-06-12 05:55:43 +00:00
2023-11-18 10:02:05 +00:00
while ( pass - > draws [ first ] . pipeline ) {
first + + ; // TODO could binary search or cache
2023-04-30 06:02:37 +00:00
}
2022-06-12 05:55:43 +00:00
2023-11-18 10:02:05 +00:00
for ( uint32_t i = first ; i < pass - > drawCount ; i + + ) {
Draw * prev = & pass - > draws [ i - 1 ] ;
Draw * draw = & pass - > draws [ i ] ;
2023-04-30 06:02:37 +00:00
2023-11-18 10:02:05 +00:00
if ( i > 0 & & draw - > pipelineInfo = = prev - > pipelineInfo ) {
2023-04-30 06:02:37 +00:00
draw - > pipeline = prev - > pipeline ;
2022-06-12 05:55:43 +00:00
continue ;
}
2023-04-30 06:02:37 +00:00
uint64_t hash = hash64 ( draw - > pipelineInfo , sizeof ( gpu_pipeline_info ) ) ;
uint64_t index = map_get ( & state . pipelineLookup , hash ) ;
if ( index = = MAP_NIL ) {
2023-11-30 08:14:06 +00:00
lovrAssert ( state . pipelineCount < MAX_PIPELINES , " Too many pipelines! " ) ;
index = state . pipelineCount + + ;
os_vm_commit ( state . pipelines , state . pipelineCount * gpu_sizeof_pipeline ( ) ) ;
gpu_pipeline_init_graphics ( getPipeline ( index ) , draw - > pipelineInfo ) ;
2023-04-30 06:02:37 +00:00
map_set ( & state . pipelineLookup , hash , index ) ;
2022-06-12 05:55:43 +00:00
}
2023-04-30 06:02:37 +00:00
2023-11-30 08:14:06 +00:00
draw - > pipeline = getPipeline ( index ) ;
2022-08-24 01:49:11 +00:00
}
2023-04-30 06:02:37 +00:00
}
2022-08-24 01:49:11 +00:00
2023-04-30 06:02:37 +00:00
// Bundles
2022-08-26 04:57:15 +00:00
2023-04-30 06:02:37 +00:00
Draw * prev = NULL ;
2023-06-23 21:41:39 +00:00
for ( uint32_t i = 0 ; i < activeDrawCount ; i + + ) {
2023-11-18 10:02:05 +00:00
Draw * draw = & pass - > draws [ activeDraws [ i ] ] ;
2023-04-30 06:02:37 +00:00
if ( i > 0 & & draw - > bundleInfo = = prev - > bundleInfo ) {
draw - > bundle = prev - > bundle ;
continue ;
}
if ( draw - > bundleInfo ) {
2023-10-02 16:07:50 +00:00
draw - > bundle = getBundle ( draw - > shader - > layout , draw - > bundleInfo - > bindings , draw - > bundleInfo - > count ) ;
2023-04-30 06:02:37 +00:00
} else {
draw - > bundle = NULL ;
}
prev = draw ;
}
// Tally
2023-06-01 01:56:09 +00:00
if ( pass - > tally . active ) {
lovrPassFinishTally ( pass ) ;
}
2023-10-02 17:20:52 +00:00
if ( pass - > tally . buffer & & pass - > tally . count > 0 ) {
2023-04-30 06:02:37 +00:00
if ( ! pass - > tally . gpu ) {
2024-03-11 21:38:00 +00:00
pass - > tally . gpu = lovrMalloc ( gpu_sizeof_tally ( ) ) ;
2023-04-30 06:02:37 +00:00
gpu_tally_init ( pass - > tally . gpu , & ( gpu_tally_info ) {
. type = GPU_TALLY_PIXEL ,
. count = MAX_TALLIES * state . limits . renderSize [ 2 ]
} ) ;
2023-12-30 22:17:20 +00:00
BufferInfo info = { . size = MAX_TALLIES * state . limits . renderSize [ 2 ] * sizeof ( uint32_t ) } ;
2024-01-18 00:05:37 +00:00
pass - > tally . tempBuffer = lovrBufferCreate ( & info , NULL ) ;
2023-04-30 06:02:37 +00:00
}
2023-05-04 05:24:04 +00:00
gpu_clear_tally ( stream , pass - > tally . gpu , 0 , pass - > tally . count * canvas - > views ) ;
2023-04-30 06:02:37 +00:00
}
// Do the thing!
gpu_render_begin ( stream , & target ) ;
float defaultViewport [ 6 ] = { 0.f , 0.f , ( float ) canvas - > width , ( float ) canvas - > height , 0.f , 1.f } ;
uint32_t defaultScissor [ 4 ] = { 0 , 0 , canvas - > width , canvas - > height } ;
2023-09-27 07:24:01 +00:00
float * viewport = pass - > viewport [ 2 ] = = 0.f & & pass - > viewport [ 3 ] = = 0.f ? defaultViewport : pass - > viewport ;
uint32_t * scissor = pass - > scissor [ 2 ] = = 0 & & pass - > scissor [ 3 ] = = 0 ? defaultScissor : pass - > scissor ;
2023-04-30 06:02:37 +00:00
gpu_set_viewport ( stream , viewport , viewport + 4 ) ;
gpu_set_scissor ( stream , scissor ) ;
2023-06-06 02:44:53 +00:00
uint16_t cameraIndex = 0xffff ;
2023-04-30 06:02:37 +00:00
uint32_t tally = ~ 0u ;
gpu_pipeline * pipeline = NULL ;
gpu_bundle * bundle = NULL ;
Material * material = NULL ;
gpu_buffer * vertexBuffer = NULL ;
2024-03-10 17:36:13 +00:00
uint32_t vertexBufferOffset = 0 ;
2023-04-30 06:02:37 +00:00
gpu_buffer * indexBuffer = NULL ;
2024-02-20 23:07:30 +00:00
gpu_buffer * uniformBuffer = NULL ;
uint32_t uniformOffset = 0 ;
gpu_bundle * uniformBundle = NULL ;
2023-04-30 06:02:37 +00:00
2023-12-30 22:17:20 +00:00
gpu_bind_vertex_buffers ( stream , & state . defaultBuffer - > gpu , & state . defaultBuffer - > base , 1 , 1 ) ;
2022-08-24 01:49:11 +00:00
2023-06-23 21:41:39 +00:00
for ( uint32_t i = 0 ; i < activeDrawCount ; i + + ) {
2023-11-18 10:02:05 +00:00
Draw * draw = & pass - > draws [ activeDraws [ i ] ] ;
2023-04-30 06:02:37 +00:00
2023-10-02 17:20:52 +00:00
if ( pass - > tally . buffer & & draw - > tally ! = tally ) {
2023-04-30 06:02:37 +00:00
if ( tally ! = ~ 0u ) gpu_tally_finish ( stream , pass - > tally . gpu , tally * canvas - > views ) ;
if ( draw - > tally ! = ~ 0u ) gpu_tally_begin ( stream , pass - > tally . gpu , draw - > tally * canvas - > views ) ;
tally = draw - > tally ;
}
if ( draw - > pipeline ! = pipeline ) {
gpu_bind_pipeline ( stream , draw - > pipeline , GPU_PIPELINE_GRAPHICS ) ;
pipeline = draw - > pipeline ;
}
2024-02-20 23:07:30 +00:00
if ( ( i & 0xff ) = = 0 | | draw - > camera ! = cameraIndex ) {
2023-08-02 01:45:37 +00:00
uint32_t dynamicOffsets [ ] = { draw - > camera * canvas - > views * sizeof ( Camera ) , ( i > > 8 ) * 256 * sizeof ( DrawData ) } ;
2023-06-06 02:44:53 +00:00
gpu_bind_bundles ( stream , draw - > shader - > gpu , & builtinBundle , 0 , 1 , dynamicOffsets , COUNTOF ( dynamicOffsets ) ) ;
cameraIndex = draw - > camera ;
2023-04-30 06:02:37 +00:00
}
2024-02-20 23:07:30 +00:00
if ( draw - > material ! = material ) {
2023-04-30 06:02:37 +00:00
gpu_bind_bundles ( stream , draw - > shader - > gpu , & draw - > material - > bundle , 1 , 1 , NULL , 0 ) ;
material = draw - > material ;
}
2024-02-20 23:07:30 +00:00
if ( draw - > bundle & & ( draw - > bundle ! = bundle ) ) {
2023-04-30 06:02:37 +00:00
gpu_bind_bundles ( stream , draw - > shader - > gpu , & draw - > bundle , 2 , 1 , NULL , 0 ) ;
bundle = draw - > bundle ;
}
2024-02-20 23:07:30 +00:00
if ( draw - > uniformBuffer ! = uniformBuffer | | draw - > uniformOffset ! = uniformOffset ) {
if ( draw - > uniformBuffer ! = uniformBuffer ) {
uniformBundle = getBundle ( LAYOUT_UNIFORMS , & ( gpu_binding ) {
. number = 0 ,
. type = GPU_SLOT_UNIFORM_BUFFER_DYNAMIC ,
. buffer . object = draw - > uniformBuffer ,
. buffer . extent = draw - > shader - > uniformSize
} , 1 ) ;
}
gpu_bind_bundles ( stream , draw - > shader - > gpu , & uniformBundle , 3 , 1 , & draw - > uniformOffset , 1 ) ;
uniformBuffer = draw - > uniformBuffer ;
uniformOffset = draw - > uniformOffset ;
}
2024-03-10 17:36:13 +00:00
if ( draw - > vertexBuffer & & ( draw - > vertexBuffer ! = vertexBuffer | | draw - > vertexBufferOffset ! = vertexBufferOffset ) ) {
gpu_bind_vertex_buffers ( stream , & draw - > vertexBuffer , & draw - > vertexBufferOffset , 0 , 1 ) ;
2023-04-30 06:02:37 +00:00
vertexBuffer = draw - > vertexBuffer ;
2024-03-10 17:36:13 +00:00
vertexBufferOffset = draw - > vertexBufferOffset ;
2023-04-30 06:02:37 +00:00
}
if ( draw - > indexBuffer & & draw - > indexBuffer ! = indexBuffer ) {
gpu_index_type indexType = ( draw - > flags & DRAW_INDEX32 ) ? GPU_INDEX_U32 : GPU_INDEX_U16 ;
gpu_bind_index_buffer ( stream , draw - > indexBuffer , 0 , indexType ) ;
indexBuffer = draw - > indexBuffer ;
}
2024-03-10 17:36:13 +00:00
uint32_t DrawID = i & 0xff ;
gpu_push_constants ( stream , draw - > shader - > gpu , & DrawID , sizeof ( DrawID ) ) ;
2023-04-30 06:02:37 +00:00
if ( draw - > flags & DRAW_INDIRECT ) {
if ( draw - > indexBuffer ) {
gpu_draw_indirect_indexed ( stream , draw - > indirect . buffer , draw - > indirect . offset , draw - > indirect . count , draw - > indirect . stride ) ;
} else {
gpu_draw_indirect ( stream , draw - > indirect . buffer , draw - > indirect . offset , draw - > indirect . count , draw - > indirect . stride ) ;
}
} else {
if ( draw - > indexBuffer ) {
2024-02-20 23:14:39 +00:00
gpu_draw_indexed ( stream , draw - > count , draw - > instances , draw - > start , draw - > baseVertex , 0 ) ;
2023-04-30 06:02:37 +00:00
} else {
2024-02-20 23:14:39 +00:00
gpu_draw ( stream , draw - > count , draw - > instances , draw - > start , 0 ) ;
2022-08-24 01:49:11 +00:00
}
2022-06-12 05:55:43 +00:00
}
2022-06-12 02:07:46 +00:00
}
2023-04-30 06:02:37 +00:00
if ( tally ! = ~ 0u ) {
2023-05-04 05:24:04 +00:00
gpu_tally_finish ( stream , pass - > tally . gpu , tally * canvas - > views ) ;
2023-04-30 06:02:37 +00:00
}
2024-01-12 01:22:58 +00:00
gpu_render_end ( stream , & target ) ;
2023-04-30 06:02:37 +00:00
// Automipmap
2024-01-14 22:51:23 +00:00
bool synchronized = false ;
for ( uint32_t t = 0 ; t < canvas - > count ; t + + ) {
if ( canvas - > color [ t ] . texture - > info . mipmaps > 1 ) {
if ( ! synchronized ) {
synchronized = true ;
gpu_sync ( stream , & ( gpu_barrier ) {
. prev = GPU_PHASE_COLOR ,
. next = GPU_PHASE_BLIT ,
. flush = GPU_CACHE_COLOR_WRITE ,
. clear = GPU_CACHE_TRANSFER_READ
} , 1 ) ;
}
2023-04-30 06:02:37 +00:00
2024-01-14 22:51:23 +00:00
mipmapTexture ( stream , canvas - > color [ t ] . texture , 0 , ~ 0u ) ;
2023-04-30 06:02:37 +00:00
}
}
texture = canvas - > depth . texture ;
2024-01-14 22:51:23 +00:00
if ( canvas - > depth . texture & & canvas - > depth . texture - > info . mipmaps > 1 ) {
gpu_sync ( stream , & ( gpu_barrier ) {
. prev = GPU_PHASE_DEPTH_EARLY | GPU_PHASE_DEPTH_LATE ,
. next = GPU_PHASE_BLIT ,
. flush = GPU_CACHE_DEPTH_WRITE ,
. clear = GPU_CACHE_TRANSFER_READ
} , 1 ) ;
mipmapTexture ( stream , canvas - > depth . texture , 0 , ~ 0u ) ;
2023-04-30 06:02:37 +00:00
}
// Tally copy
2023-10-02 17:20:52 +00:00
if ( pass - > tally . buffer & & pass - > tally . count > 0 ) {
Tally * tally = & pass - > tally ;
uint32_t count = MIN ( tally - > count , ( tally - > buffer - > info . size - tally - > bufferOffset ) / 4 ) ;
2024-01-18 00:05:37 +00:00
Buffer * tempBuffer = pass - > tally . tempBuffer ;
2023-10-02 17:20:52 +00:00
2024-01-18 00:05:37 +00:00
gpu_copy_tally_buffer ( stream , tally - > gpu , tempBuffer - > gpu , 0 , tempBuffer - > base , count * canvas - > views ) ;
2023-04-30 06:02:37 +00:00
gpu_barrier barrier = {
2024-01-14 22:51:23 +00:00
. prev = GPU_PHASE_COPY ,
2023-04-30 06:02:37 +00:00
. next = GPU_PHASE_SHADER_COMPUTE ,
. flush = GPU_CACHE_TRANSFER_WRITE ,
. clear = GPU_CACHE_STORAGE_READ
} ;
Access access = {
2023-10-02 17:20:52 +00:00
. sync = & tally - > buffer - > sync ,
2023-12-01 03:28:06 +00:00
. object = tally - > buffer ,
2023-04-30 06:02:37 +00:00
. phase = GPU_PHASE_SHADER_COMPUTE ,
. cache = GPU_CACHE_STORAGE_WRITE
} ;
syncResource ( & access , & barrier ) ;
gpu_sync ( stream , & barrier , 1 ) ;
gpu_binding bindings [ ] = {
2024-01-18 00:05:37 +00:00
{ 0 , GPU_SLOT_STORAGE_BUFFER , . buffer = { tempBuffer - > gpu , tempBuffer - > base , count * canvas - > views * sizeof ( uint32_t ) } } ,
2023-12-30 22:17:20 +00:00
{ 1 , GPU_SLOT_STORAGE_BUFFER , . buffer = { tally - > buffer - > gpu , tally - > buffer - > base + tally - > bufferOffset , count * sizeof ( uint32_t ) } }
2023-04-30 06:02:37 +00:00
} ;
2023-10-02 16:07:50 +00:00
Shader * shader = lovrGraphicsGetDefaultShader ( SHADER_TALLY_MERGE ) ;
gpu_bundle * bundle = getBundle ( shader - > layout , bindings , COUNTOF ( bindings ) ) ;
2023-10-02 17:20:52 +00:00
uint32_t constants [ 2 ] = { count , canvas - > views } ;
2023-04-30 06:02:37 +00:00
gpu_compute_begin ( stream ) ;
gpu_bind_pipeline ( stream , shader - > computePipeline , GPU_PIPELINE_COMPUTE ) ;
gpu_bind_bundles ( stream , shader - > gpu , & bundle , 0 , 1 , NULL , 0 ) ;
2023-10-02 17:20:52 +00:00
gpu_push_constants ( stream , shader - > gpu , constants , sizeof ( constants ) ) ;
gpu_compute ( stream , ( count + 31 ) / 32 , 1 , 1 ) ;
2023-04-30 06:02:37 +00:00
gpu_compute_end ( stream ) ;
2022-08-07 06:25:49 +00:00
}
2023-04-30 06:02:37 +00:00
}
2023-12-30 20:39:50 +00:00
static Readback * lovrReadbackCreateTimestamp ( TimingInfo * passes , uint32_t count , BufferView view ) ;
2023-05-07 06:36:33 +00:00
2023-04-30 06:02:37 +00:00
void lovrGraphicsSubmit ( Pass * * passes , uint32_t count ) {
beginFrame ( ) ;
2024-03-02 01:13:06 +00:00
bool xrCanvas = false ;
uint32_t streamCount = 0 ;
uint32_t maxStreams = count + 3 ;
gpu_stream * * streams = tempAlloc ( & state . allocator , maxStreams * sizeof ( gpu_stream * ) ) ;
2023-10-05 19:09:55 +00:00
gpu_barrier * computeBarriers = tempAlloc ( & state . allocator , count * sizeof ( gpu_barrier ) ) ;
gpu_barrier * renderBarriers = tempAlloc ( & state . allocator , count * sizeof ( gpu_barrier ) ) ;
if ( count > 0 ) {
memset ( computeBarriers , 0 , count * sizeof ( gpu_barrier ) ) ;
memset ( renderBarriers , 0 , count * sizeof ( gpu_barrier ) ) ;
}
2023-04-30 06:02:37 +00:00
2024-03-02 01:13:06 +00:00
if ( state . transferBarrier . prev ! = 0 & & state . transferBarrier . next ! = 0 ) {
gpu_stream * stream = streams [ streamCount + + ] = gpu_stream_begin ( NULL ) ;
gpu_sync ( stream , & state . transferBarrier , 1 ) ;
gpu_stream_end ( stream ) ;
2023-04-30 06:02:37 +00:00
}
2024-03-02 01:13:06 +00:00
streams [ streamCount + + ] = state . stream ;
2022-08-07 06:25:49 +00:00
2023-04-30 06:02:37 +00:00
// Synchronization
2022-06-12 02:07:46 +00:00
for ( uint32_t i = 0 ; i < count ; i + + ) {
2023-04-30 06:02:37 +00:00
Pass * pass = passes [ i ] ;
Canvas * canvas = & pass - > canvas ;
2022-08-06 01:36:51 +00:00
2024-03-02 01:13:06 +00:00
state . shouldPresent | = pass = = state . windowPass ;
2023-04-30 06:02:37 +00:00
// Compute
for ( AccessBlock * block = pass - > access [ ACCESS_COMPUTE ] ; block ! = NULL ; block = block - > next ) {
for ( uint64_t j = 0 ; j < block - > count ; j + + ) {
Access * access = & block - > list [ j ] ;
2023-05-07 03:36:11 +00:00
if ( access - > sync - > barrier ! = & computeBarriers [ i ] & & syncResource ( access , access - > sync - > barrier ) ) {
2023-04-30 06:02:37 +00:00
access - > sync - > barrier = & computeBarriers [ i ] ;
}
}
}
2022-08-06 01:36:51 +00:00
2023-04-30 06:02:37 +00:00
// Color attachments
for ( uint32_t t = 0 ; t < canvas - > count ; t + + ) {
if ( canvas - > color [ t ] . texture = = state . window ) continue ;
2023-12-01 03:28:06 +00:00
Texture * texture = canvas - > color [ t ] . texture ;
2022-08-06 01:36:51 +00:00
2023-04-30 06:02:37 +00:00
Access access = {
2024-02-26 23:08:34 +00:00
. sync = & texture - > root - > sync ,
2023-12-01 03:28:06 +00:00
. object = texture ,
2023-04-30 06:02:37 +00:00
. phase = GPU_PHASE_COLOR ,
. cache = GPU_CACHE_COLOR_WRITE | ( ( ! canvas - > resolve & & canvas - > color [ t ] . load = = LOAD_KEEP ) ? GPU_CACHE_COLOR_READ : 0 )
} ;
syncResource ( & access , access . sync - > barrier ) ;
access . sync - > barrier = & renderBarriers [ i ] ;
2023-12-01 03:28:06 +00:00
if ( texture - > info . mipmaps > 1 ) {
2024-01-14 22:51:23 +00:00
access . sync - > writePhase = GPU_PHASE_BLIT ;
2023-04-30 06:02:37 +00:00
access . sync - > pendingWrite = GPU_CACHE_TRANSFER_WRITE ;
2022-08-06 01:36:51 +00:00
}
2024-03-02 01:13:06 +00:00
if ( texture - > info . xr & & ! texture - > xrAcquired ) {
gpu_xr_acquire ( state . stream , texture - > gpu ) ;
texture - > xrAcquired = true ;
xrCanvas = true ;
}
2022-06-12 05:55:43 +00:00
}
2023-04-30 06:02:37 +00:00
// Depth attachment
if ( canvas - > depth . texture ) {
2023-12-01 03:28:06 +00:00
Texture * texture = canvas - > depth . texture ;
Access access = {
2024-02-26 23:08:34 +00:00
. sync = & texture - > root - > sync ,
2023-12-01 03:28:06 +00:00
. object = texture
} ;
2023-04-30 06:02:37 +00:00
if ( canvas - > resolve ) {
access . phase = GPU_PHASE_COLOR ; // Depth resolve operations act like color resolves w.r.t. sync
access . cache = GPU_CACHE_COLOR_WRITE ;
} else {
access . phase = canvas - > depth . load = = LOAD_KEEP ? GPU_PHASE_DEPTH_EARLY : GPU_PHASE_DEPTH_LATE ;
access . cache = GPU_CACHE_DEPTH_WRITE | ( canvas - > depth . load = = LOAD_KEEP ? GPU_CACHE_DEPTH_READ : 0 ) ;
}
syncResource ( & access , access . sync - > barrier ) ;
access . sync - > barrier = & renderBarriers [ i ] ;
2023-12-01 03:28:06 +00:00
if ( texture - > info . mipmaps > 1 ) {
2024-01-14 22:51:23 +00:00
access . sync - > writePhase = GPU_PHASE_BLIT ;
2023-04-30 06:02:37 +00:00
access . sync - > pendingWrite = GPU_CACHE_TRANSFER_WRITE ;
}
2024-03-02 01:13:06 +00:00
if ( texture - > info . xr & & ! texture - > xrAcquired ) {
gpu_xr_acquire ( state . stream , texture - > gpu ) ;
texture - > xrAcquired = true ;
xrCanvas = true ;
}
2023-04-30 06:02:37 +00:00
}
// Render resources (all read-only)
for ( AccessBlock * block = pass - > access [ ACCESS_RENDER ] ; block ! = NULL ; block = block - > next ) {
for ( uint64_t j = 0 ; j < block - > count ; j + + ) {
syncResource ( & block - > list [ j ] , block - > list [ j ] . sync - > barrier ) ;
}
}
}
2023-11-30 09:16:19 +00:00
TimingInfo * times = NULL ;
2023-05-07 06:36:33 +00:00
2023-08-25 20:42:09 +00:00
if ( state . timingEnabled & & count > 0 ) {
2024-03-11 21:38:00 +00:00
times = lovrMalloc ( count * sizeof ( TimingInfo ) ) ;
2023-05-07 06:36:33 +00:00
for ( uint32_t i = 0 ; i < count ; i + + ) {
times [ i ] . pass = passes [ i ] ;
lovrRetain ( passes [ i ] ) ;
}
uint32_t timestampCount = 2 * count ;
if ( timestampCount > state . timestampCount ) {
if ( state . timestamps ) {
gpu_tally_destroy ( state . timestamps ) ;
} else {
2024-03-11 21:38:00 +00:00
state . timestamps = lovrMalloc ( gpu_sizeof_tally ( ) ) ;
2023-05-07 06:36:33 +00:00
}
gpu_tally_info info = {
. type = GPU_TALLY_TIME ,
. count = timestampCount
} ;
gpu_tally_init ( state . timestamps , & info ) ;
state . timestampCount = timestampCount ;
}
gpu_clear_tally ( state . stream , state . timestamps , 0 , timestampCount ) ;
}
2024-03-02 01:13:06 +00:00
gpu_sync ( state . stream , & state . barrier , 1 ) ;
gpu_stream_end ( state . stream ) ;
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < count ; i + + ) {
2024-03-02 01:13:06 +00:00
gpu_stream * stream = streams [ streamCount + + ] = gpu_stream_begin ( NULL ) ;
2023-04-30 06:02:37 +00:00
2023-05-07 06:36:33 +00:00
if ( state . timingEnabled ) {
times [ i ] . cpuTime = os_get_time ( ) ;
gpu_tally_mark ( stream , state . timestamps , 2 * i + 0 ) ;
}
2023-06-23 21:41:39 +00:00
recordComputePass ( passes [ i ] , stream ) ;
2023-10-05 19:09:55 +00:00
gpu_sync ( stream , & computeBarriers [ i ] , 1 ) ;
2023-04-30 06:02:37 +00:00
2023-06-23 21:41:39 +00:00
recordRenderPass ( passes [ i ] , stream ) ;
2023-10-05 19:09:55 +00:00
gpu_sync ( stream , & renderBarriers [ i ] , 1 ) ;
2023-04-30 06:02:37 +00:00
2023-05-07 06:36:33 +00:00
if ( state . timingEnabled ) {
times [ i ] . cpuTime = os_get_time ( ) - times [ i ] . cpuTime ;
gpu_tally_mark ( stream , state . timestamps , 2 * i + 1 ) ;
}
2024-03-02 01:13:06 +00:00
gpu_stream_end ( stream ) ;
2022-06-12 05:55:43 +00:00
}
2024-03-02 01:13:06 +00:00
if ( xrCanvas | | ( state . timingEnabled & & count > 0 ) ) {
gpu_stream * stream = streams [ streamCount + + ] = gpu_stream_begin ( NULL ) ;
// Timestamp Readback
2024-03-07 18:26:35 +00:00
if ( state . timingEnabled ) {
BufferView view = getBuffer ( GPU_BUFFER_DOWNLOAD , 2 * count * sizeof ( uint32_t ) , 4 ) ;
gpu_copy_tally_buffer ( stream , state . timestamps , view . buffer , 0 , view . offset , 2 * count ) ;
Readback * readback = lovrReadbackCreateTimestamp ( times , count , view ) ;
lovrRelease ( readback , lovrReadbackDestroy ) ; // It gets freed when it completes
}
2023-05-07 06:36:33 +00:00
2024-03-02 01:13:06 +00:00
// OpenXR Swapchain Layout Transitions
for ( uint32_t i = 0 ; i < count ; i + + ) {
Canvas * canvas = & passes [ i ] - > canvas ;
for ( uint32_t t = 0 ; t < canvas - > count ; t + + ) {
Texture * texture = canvas - > color [ t ] . texture ;
if ( texture - > info . xr & & texture - > xrAcquired ) {
gpu_xr_release ( stream , texture - > gpu ) ;
texture - > xrAcquired = false ;
}
}
if ( canvas - > depth . texture ) {
Texture * texture = canvas - > depth . texture ;
if ( texture - > info . xr & & texture - > xrAcquired ) {
gpu_xr_release ( stream , texture - > gpu ) ;
texture - > xrAcquired = false ;
}
}
}
gpu_stream_end ( stream ) ;
2022-04-29 05:30:31 +00:00
}
2024-03-02 01:13:06 +00:00
// Cleanup
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < count ; i + + ) {
2023-05-07 03:32:16 +00:00
Canvas * canvas = & passes [ i ] - > canvas ;
2023-04-30 06:02:37 +00:00
2024-03-02 01:13:06 +00:00
// Reset barriers back to the default
2023-05-07 03:32:16 +00:00
for ( uint32_t t = 0 ; t < canvas - > count ; t + + ) {
2024-03-02 01:13:06 +00:00
canvas - > color [ t ] . texture - > sync . barrier = & state . barrier ;
2023-05-07 03:32:16 +00:00
}
if ( canvas - > depth . texture ) {
2024-03-02 01:13:06 +00:00
canvas - > depth . texture - > sync . barrier = & state . barrier ;
2023-05-07 03:32:16 +00:00
}
for ( uint32_t j = 0 ; j < COUNTOF ( passes [ i ] - > access ) ; j + + ) {
for ( AccessBlock * block = passes [ i ] - > access [ j ] ; block ! = NULL ; block = block - > next ) {
for ( uint32_t k = 0 ; k < block - > count ; k + + ) {
2024-03-02 01:13:06 +00:00
block - > list [ k ] . sync - > barrier = & state . barrier ;
2023-04-30 06:02:37 +00:00
}
}
}
2023-12-30 20:39:50 +00:00
2024-03-02 01:13:06 +00:00
// Mark the tick for any buffers that filled up, so we know when to recycle them
2023-12-30 20:39:50 +00:00
for ( BufferBlock * block = passes [ i ] - > buffers . freelist ; block ; block = block - > next ) {
block - > tick = state . tick ;
}
2023-04-30 06:02:37 +00:00
}
gpu_submit ( streams , streamCount ) ;
2022-06-12 02:07:46 +00:00
2022-08-04 07:06:54 +00:00
state . active = false ;
2024-03-02 01:13:06 +00:00
state . stream = NULL ;
2022-08-04 07:06:54 +00:00
}
2023-04-26 04:45:30 +00:00
void lovrGraphicsPresent ( void ) {
2024-03-02 01:13:06 +00:00
if ( state . shouldPresent ) {
2022-06-12 02:07:46 +00:00
state . window - > gpu = NULL ;
state . window - > renderView = NULL ;
2024-03-02 01:13:06 +00:00
state . shouldPresent = false ;
2023-09-18 01:29:59 +00:00
gpu_surface_present ( ) ;
2022-06-12 02:07:46 +00:00
}
2024-01-08 21:45:35 +00:00
lovrProfileMarkFrame ( ) ;
2022-04-29 05:30:31 +00:00
}
2023-04-26 04:45:30 +00:00
void lovrGraphicsWait ( void ) {
2023-09-29 06:26:04 +00:00
if ( state . active ) {
lovrGraphicsSubmit ( NULL , 0 ) ;
}
2022-07-14 07:05:58 +00:00
gpu_wait_idle ( ) ;
2023-09-29 06:26:04 +00:00
processReadbacks ( ) ;
2022-04-29 05:37:03 +00:00
}
2022-04-26 22:32:54 +00:00
// Buffer
2023-08-25 20:42:09 +00:00
uint32_t lovrGraphicsAlignFields ( DataField * parent , DataLayout layout ) {
static const struct { uint32_t size , scalarAlign , baseAlign ; } table [ ] = {
[ TYPE_I8x4 ] = { 4 , 1 , 4 } ,
[ TYPE_U8x4 ] = { 4 , 1 , 4 } ,
[ TYPE_SN8x4 ] = { 4 , 1 , 4 } ,
[ TYPE_UN8x4 ] = { 4 , 1 , 4 } ,
2024-01-21 01:18:54 +00:00
[ TYPE_SN10x3 ] = { 4 , 4 , 4 } ,
2023-08-25 20:42:09 +00:00
[ TYPE_UN10x3 ] = { 4 , 4 , 4 } ,
[ TYPE_I16 ] = { 2 , 2 , 2 } ,
[ TYPE_I16x2 ] = { 4 , 2 , 4 } ,
[ TYPE_I16x4 ] = { 8 , 2 , 8 } ,
[ TYPE_U16 ] = { 2 , 2 , 2 } ,
[ TYPE_U16x2 ] = { 4 , 2 , 4 } ,
[ TYPE_U16x4 ] = { 8 , 2 , 8 } ,
[ TYPE_SN16x2 ] = { 4 , 2 , 4 } ,
[ TYPE_SN16x4 ] = { 8 , 2 , 8 } ,
[ TYPE_UN16x2 ] = { 4 , 2 , 4 } ,
[ TYPE_UN16x4 ] = { 8 , 2 , 8 } ,
[ TYPE_I32 ] = { 4 , 4 , 4 } ,
[ TYPE_I32x2 ] = { 8 , 4 , 8 } ,
[ TYPE_I32x3 ] = { 12 , 4 , 16 } ,
[ TYPE_I32x4 ] = { 16 , 4 , 16 } ,
[ TYPE_U32 ] = { 4 , 4 , 4 } ,
[ TYPE_U32x2 ] = { 8 , 4 , 8 } ,
[ TYPE_U32x3 ] = { 12 , 4 , 16 } ,
[ TYPE_U32x4 ] = { 16 , 4 , 16 } ,
[ TYPE_F16x2 ] = { 4 , 2 , 4 } ,
[ TYPE_F16x4 ] = { 8 , 2 , 8 } ,
[ TYPE_F32 ] = { 4 , 4 , 4 } ,
[ TYPE_F32x2 ] = { 8 , 4 , 8 } ,
[ TYPE_F32x3 ] = { 12 , 4 , 16 } ,
[ TYPE_F32x4 ] = { 16 , 4 , 16 } ,
[ TYPE_MAT2 ] = { 16 , 4 , 8 } ,
[ TYPE_MAT3 ] = { 48 , 4 , 16 } ,
[ TYPE_MAT4 ] = { 64 , 4 , 16 } ,
[ TYPE_INDEX16 ] = { 2 , 2 , 2 } ,
[ TYPE_INDEX32 ] = { 4 , 4 , 4 }
} ;
uint32_t cursor = 0 ;
uint32_t extent = 0 ;
uint32_t align = 1 ;
2024-04-16 22:04:20 +00:00
if ( parent - > fieldCount = = 0 ) {
align = layout = = LAYOUT_PACKED ? table [ parent - > type ] . scalarAlign : table [ parent - > type ] . baseAlign ;
extent = table [ parent - > type ] . size ;
}
2023-08-25 20:42:09 +00:00
for ( uint32_t i = 0 ; i < parent - > fieldCount ; i + + ) {
DataField * field = & parent - > fields [ i ] ;
uint32_t length = MAX ( field - > length , 1 ) ;
uint32_t subalign ;
if ( field - > fieldCount > 0 ) {
subalign = lovrGraphicsAlignFields ( field , layout ) ;
} else {
subalign = layout = = LAYOUT_PACKED ? table [ field - > type ] . scalarAlign : table [ field - > type ] . baseAlign ;
if ( field - > length > 0 ) {
subalign = layout = = LAYOUT_STD140 ? MAX ( subalign , 16 ) : subalign ;
field - > stride = MAX ( subalign , table [ field - > type ] . size ) ;
} else {
field - > stride = table [ field - > type ] . size ;
}
}
if ( field - > offset = = 0 ) {
field - > offset = ALIGN ( cursor , subalign ) ;
cursor = field - > offset + length * field - > stride ;
}
align = MAX ( align , subalign ) ;
extent = MAX ( extent , field - > offset + length * field - > stride ) ;
}
if ( layout = = LAYOUT_STD140 ) align = MAX ( align , 16 ) ;
if ( parent - > stride = = 0 ) parent - > stride = ALIGN ( extent , align ) ;
return align ;
}
2023-10-15 20:57:18 +00:00
Buffer * lovrBufferCreate ( const BufferInfo * info , void * * data ) {
2023-08-25 20:42:09 +00:00
uint32_t fieldCount = info - > format ? MAX ( info - > fieldCount , info - > format - > fieldCount + 1 ) : 0 ;
size_t charCount = 0 ;
for ( uint32_t i = 0 ; i < fieldCount ; i + + ) {
if ( ! info - > format [ i ] . name ) continue ;
charCount + = strlen ( info - > format [ i ] . name ) + 1 ;
}
2023-12-30 22:17:20 +00:00
charCount = ALIGN ( charCount , 8 ) ;
2024-03-11 21:38:00 +00:00
Buffer * buffer = lovrCalloc ( sizeof ( Buffer ) + charCount + fieldCount * sizeof ( DataField ) ) ;
2023-06-24 04:14:19 +00:00
buffer - > ref = 1 ;
buffer - > info = * info ;
2023-08-25 20:42:09 +00:00
buffer - > info . fieldCount = fieldCount ;
2023-01-16 13:15:13 +00:00
2023-06-24 04:14:19 +00:00
if ( info - > format ) {
2023-12-30 22:17:20 +00:00
char * names = ( char * ) buffer + sizeof ( Buffer ) ;
2023-08-25 20:42:09 +00:00
DataField * format = buffer - > info . format = ( DataField * ) ( names + charCount ) ;
memcpy ( format , info - > format , fieldCount * sizeof ( DataField ) ) ;
2022-08-23 03:30:09 +00:00
2024-04-16 22:04:20 +00:00
// Copy names, hash names, fixup children pointers, set parent pointers
2023-08-25 20:42:09 +00:00
for ( uint32_t i = 0 ; i < fieldCount ; i + + ) {
2023-06-24 04:14:19 +00:00
if ( format [ i ] . name ) {
size_t length = strlen ( format [ i ] . name ) ;
memcpy ( names , format [ i ] . name , length ) ;
names [ length ] = ' \0 ' ;
format [ i ] . name = names ;
2023-06-11 05:06:29 +00:00
format [ i ] . hash = ( uint32_t ) hash64 ( format [ i ] . name , length ) ;
2023-06-24 04:14:19 +00:00
names + = length + 1 ;
}
2022-04-27 07:28:39 +00:00
2023-08-25 20:42:09 +00:00
if ( format [ i ] . fields ) {
format [ i ] . fields = format + ( format [ i ] . fields - info - > format ) ;
2023-06-24 04:14:19 +00:00
}
2023-01-16 13:15:13 +00:00
}
2023-04-02 22:09:37 +00:00
2023-08-25 20:42:09 +00:00
// Root child pointer is optional, and if absent it implicitly points to next field
if ( format - > fieldCount > 0 & & ! format - > fields ) {
format - > fields = format + 1 ;
}
2023-06-24 05:56:33 +00:00
2024-04-16 22:04:20 +00:00
// Set parent pointers
for ( uint32_t i = 0 ; i < fieldCount ; i + + ) {
if ( format [ i ] . fields ) {
for ( uint32_t j = 0 ; j < format [ i ] . fieldCount ; j + + ) {
format [ i ] . fields [ j ] . parent = & format [ i ] ;
}
}
}
2023-08-25 20:42:09 +00:00
// Size is optional, and can be computed from format
if ( buffer - > info . size = = 0 ) {
buffer - > info . size = format - > stride * MAX ( format - > length , 1 ) ;
2023-06-24 05:56:33 +00:00
}
2023-08-25 20:42:09 +00:00
// Formats with array/struct fields have extra restrictions, cache it
for ( uint32_t i = 0 ; i < format - > fieldCount ; i + + ) {
if ( format - > fields [ i ] . fieldCount > 0 | | format - > fields [ i ] . length > 0 ) {
2024-04-16 22:04:20 +00:00
buffer - > complexFormat = true ;
2023-08-25 20:42:09 +00:00
break ;
}
}
2023-04-02 22:09:37 +00:00
}
2022-04-27 07:28:39 +00:00
2023-08-25 20:42:09 +00:00
lovrCheck ( buffer - > info . size > 0 , " Buffer size can not be zero " ) ;
lovrCheck ( buffer - > info . size < = 1 < < 30 , " Max buffer size is 1GB " ) ;
2022-04-26 22:32:54 +00:00
2023-12-30 22:17:20 +00:00
size_t stride = buffer - > info . format ? buffer - > info . format - > stride : 4 ;
size_t align = lcm ( stride , MAX ( state . limits . storageBufferAlign , state . limits . uniformBufferAlign ) ) ;
BufferView view = getBuffer ( GPU_BUFFER_STATIC , buffer - > info . size , align ) ;
buffer - > gpu = view . buffer ;
buffer - > base = view . offset ;
buffer - > block = view . block ;
atomic_fetch_add ( & buffer - > block - > ref , 1 ) ;
2022-04-26 22:32:54 +00:00
2024-01-17 23:52:56 +00:00
if ( data ) {
if ( view . pointer ) {
* data = view . pointer ;
} else {
beginFrame ( ) ;
BufferView staging = getBuffer ( GPU_BUFFER_UPLOAD , buffer - > info . size , 4 ) ;
gpu_copy_buffers ( state . stream , staging . buffer , buffer - > gpu , staging . offset , buffer - > base , buffer - > info . size ) ;
2024-01-14 22:51:23 +00:00
buffer - > sync . writePhase = GPU_PHASE_COPY ;
2024-01-17 23:52:56 +00:00
buffer - > sync . pendingWrite = GPU_CACHE_TRANSFER_WRITE ;
buffer - > sync . lastTransferWrite = state . tick ;
* data = staging . pointer ;
}
2022-04-27 07:28:39 +00:00
}
2024-03-02 01:13:06 +00:00
buffer - > sync . barrier = & state . barrier ;
2023-06-24 04:14:19 +00:00
2022-04-26 22:32:54 +00:00
return buffer ;
}
void lovrBufferDestroy ( void * ref ) {
Buffer * buffer = ref ;
2023-12-30 22:17:20 +00:00
BufferAllocator * allocator = & state . bufferAllocators [ GPU_BUFFER_STATIC ] ;
if ( buffer - > block ! = allocator - > current & & atomic_fetch_sub ( & buffer - > block - > ref , 1 ) = = 1 ) {
freeBlock ( allocator , buffer - > block ) ;
}
2024-03-11 21:38:00 +00:00
lovrFree ( buffer ) ;
2022-04-26 22:32:54 +00:00
}
const BufferInfo * lovrBufferGetInfo ( Buffer * buffer ) {
return & buffer - > info ;
}
2023-04-30 01:25:58 +00:00
void * lovrBufferGetData ( Buffer * buffer , uint32_t offset , uint32_t extent ) {
beginFrame ( ) ;
if ( extent = = ~ 0u ) extent = buffer - > info . size - offset ;
lovrCheck ( offset + extent < = buffer - > info . size , " Buffer read range goes past the end of the Buffer " ) ;
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & buffer - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_DOWNLOAD , extent , 4 ) ;
2023-12-30 22:17:20 +00:00
gpu_copy_buffers ( state . stream , buffer - > gpu , view . buffer , buffer - > base + offset , view . offset , extent ) ;
2023-04-30 01:25:58 +00:00
lovrGraphicsSubmit ( NULL , 0 ) ;
lovrGraphicsWait ( ) ;
2023-12-30 20:39:50 +00:00
return view . pointer ;
2022-04-27 07:28:39 +00:00
}
2023-04-30 01:25:58 +00:00
void * lovrBufferSetData ( Buffer * buffer , uint32_t offset , uint32_t extent ) {
2023-10-15 20:57:18 +00:00
beginFrame ( ) ;
if ( extent = = ~ 0u ) extent = buffer - > info . size - offset ;
lovrCheck ( offset + extent < = buffer - > info . size , " Attempt to write past the end of the Buffer " ) ;
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_UPLOAD , extent , 4 ) ;
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & buffer - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_WRITE ) ;
2023-10-15 20:57:18 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2023-12-30 22:17:20 +00:00
gpu_copy_buffers ( state . stream , view . buffer , buffer - > gpu , view . offset , buffer - > base + offset , extent ) ;
2023-12-30 20:39:50 +00:00
return view . pointer ;
2023-04-30 01:25:58 +00:00
}
void lovrBufferCopy ( Buffer * src , Buffer * dst , uint32_t srcOffset , uint32_t dstOffset , uint32_t extent ) {
beginFrame ( ) ;
lovrCheck ( srcOffset + extent < = src - > info . size , " Buffer copy range goes past the end of the source Buffer " ) ;
lovrCheck ( dstOffset + extent < = dst - > info . size , " Buffer copy range goes past the end of the destination Buffer " ) ;
2024-01-30 05:28:00 +00:00
lovrCheck ( src ! = dst | | ( srcOffset > = dstOffset + extent | | dstOffset > = srcOffset + extent ) , " Copying part of a Buffer to itself requires non-overlapping copy regions " ) ;
2023-04-30 01:25:58 +00:00
gpu_barrier barriers [ 2 ] ;
2024-01-14 22:51:23 +00:00
barriers [ 0 ] = syncTransfer ( & src - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
barriers [ 1 ] = syncTransfer ( & dst - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_WRITE ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , barriers , 2 ) ;
2023-12-30 22:17:20 +00:00
gpu_copy_buffers ( state . stream , src - > gpu , dst - > gpu , src - > base + srcOffset , dst - > base + dstOffset , extent ) ;
2023-04-30 01:25:58 +00:00
}
2023-09-19 06:05:27 +00:00
void lovrBufferClear ( Buffer * buffer , uint32_t offset , uint32_t extent , uint32_t value ) {
2023-10-15 20:57:18 +00:00
if ( extent = = 0 ) return ;
2023-04-30 01:25:58 +00:00
if ( extent = = ~ 0u ) extent = buffer - > info . size - offset ;
2022-04-29 05:30:31 +00:00
lovrCheck ( offset % 4 = = 0 , " Buffer clear offset must be a multiple of 4 " ) ;
2023-04-30 01:25:58 +00:00
lovrCheck ( extent % 4 = = 0 , " Buffer clear extent must be a multiple of 4 " ) ;
lovrCheck ( offset + extent < = buffer - > info . size , " Buffer clear range goes past the end of the Buffer " ) ;
2023-10-15 20:57:18 +00:00
beginFrame ( ) ;
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & buffer - > sync , GPU_PHASE_CLEAR , GPU_CACHE_TRANSFER_WRITE ) ;
2023-10-15 20:57:18 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2023-12-30 22:17:20 +00:00
gpu_clear_buffer ( state . stream , buffer - > gpu , buffer - > base + offset , extent , value ) ;
2022-04-27 07:28:39 +00:00
}
2022-04-30 03:38:34 +00:00
// Texture
2023-04-26 04:45:30 +00:00
Texture * lovrGraphicsGetWindowTexture ( void ) {
2023-09-18 01:29:59 +00:00
if ( ! state . window & & os_window_is_open ( ) ) {
uint32_t width , height ;
os_window_get_size ( & width , & height ) ;
float density = os_window_get_pixel_density ( ) ;
width * = density ;
height * = density ;
2024-03-11 21:38:00 +00:00
state . window = lovrCalloc ( sizeof ( Texture ) ) ;
2023-09-18 01:29:59 +00:00
state . window - > ref = 1 ;
state . window - > gpu = NULL ;
state . window - > renderView = NULL ;
state . window - > info = ( TextureInfo ) {
. type = TEXTURE_2D ,
. format = GPU_FORMAT_SURFACE ,
. width = width ,
. height = height ,
. layers = 1 ,
. mipmaps = 1 ,
. usage = TEXTURE_RENDER ,
. srgb = true
} ;
bool vsync = state . config . vsync ;
# ifndef LOVR_DISABLE_HEADSET
2023-09-28 06:04:55 +00:00
if ( lovrHeadsetInterface & & lovrHeadsetInterface - > driverType ! = DRIVER_SIMULATOR ) {
2023-09-18 01:29:59 +00:00
vsync = false ;
}
# endif
gpu_surface_info info = {
. width = width ,
. height = height ,
. vsync = vsync ,
# if defined(_WIN32)
. win32 . window = os_get_win32_window ( ) ,
. win32 . instance = os_get_win32_instance ( )
# elif defined(__APPLE__)
. macos . layer = os_get_ca_metal_layer ( )
# elif defined(__linux__) && !defined(__ANDROID__)
. xcb . connection = os_get_xcb_connection ( ) ,
. xcb . window = os_get_xcb_window ( )
# endif
} ;
gpu_surface_init ( & info ) ;
os_on_resize ( onResize ) ;
state . depthFormat = state . config . stencil ? FORMAT_D32FS8 : FORMAT_D32F ;
2024-03-30 01:07:25 +00:00
if ( ! lovrGraphicsGetFormatSupport ( state . depthFormat , TEXTURE_FEATURE_RENDER ) ) {
state . depthFormat = state . config . stencil ? FORMAT_D24S8 : FORMAT_D24 ;
2023-09-18 01:29:59 +00:00
}
}
2023-05-05 01:27:02 +00:00
if ( state . window & & ! state . window - > gpu ) {
2022-05-11 22:28:04 +00:00
beginFrame ( ) ;
2022-11-08 06:45:10 +00:00
2022-05-11 22:28:04 +00:00
state . window - > gpu = gpu_surface_acquire ( ) ;
state . window - > renderView = state . window - > gpu ;
2022-11-08 06:45:10 +00:00
// Window texture may be unavailable during a resize
if ( ! state . window - > gpu ) {
return NULL ;
}
2022-05-11 22:28:04 +00:00
}
return state . window ;
}
2022-07-13 07:07:15 +00:00
Texture * lovrTextureCreate ( const TextureInfo * info ) {
2022-04-30 03:38:34 +00:00
uint32_t limits [ ] = {
[ TEXTURE_2D ] = state . limits . textureSize2D ,
2022-04-30 03:56:23 +00:00
[ TEXTURE_3D ] = state . limits . textureSize3D ,
2022-04-30 03:38:34 +00:00
[ TEXTURE_CUBE ] = state . limits . textureSizeCube ,
2022-04-30 03:56:23 +00:00
[ TEXTURE_ARRAY ] = state . limits . textureSize2D
2022-04-30 03:38:34 +00:00
} ;
uint32_t limit = limits [ info - > type ] ;
2022-07-30 22:08:30 +00:00
uint32_t mipmapCap = log2 ( MAX ( MAX ( info - > width , info - > height ) , ( info - > type = = TEXTURE_3D ? info - > layers : 1 ) ) ) + 1 ;
2022-05-01 01:49:46 +00:00
uint32_t mipmaps = CLAMP ( info - > mipmaps , 1 , mipmapCap ) ;
2023-11-02 20:26:49 +00:00
bool srgb = supportsSRGB ( info - > format ) & & info - > srgb ;
uint8_t supports = state . features . formats [ info - > format ] [ srgb ] ;
2023-11-29 06:44:37 +00:00
uint8_t linearSupports = state . features . formats [ info - > format ] [ false ] ;
2022-04-30 03:38:34 +00:00
lovrCheck ( info - > width > 0 , " Texture width must be greater than zero " ) ;
lovrCheck ( info - > height > 0 , " Texture height must be greater than zero " ) ;
2022-07-30 22:08:30 +00:00
lovrCheck ( info - > layers > 0 , " Texture layer count must be greater than zero " ) ;
2022-04-30 03:38:34 +00:00
lovrCheck ( info - > width < = limit , " Texture %s exceeds the limit for this texture type (%d) " , " width " , limit ) ;
lovrCheck ( info - > height < = limit , " Texture %s exceeds the limit for this texture type (%d) " , " height " , limit ) ;
2022-07-30 22:08:30 +00:00
lovrCheck ( info - > layers < = limit | | info - > type ! = TEXTURE_3D , " Texture %s exceeds the limit for this texture type (%d) " , " layer count " , limit ) ;
2023-11-10 19:15:16 +00:00
lovrCheck ( info - > layers < = state . limits . textureLayers | | info - > type = = TEXTURE_3D , " Texture %s exceeds the limit for this texture type (%d) " , " layer count " , limit ) ;
2022-07-30 22:08:30 +00:00
lovrCheck ( info - > layers = = 1 | | info - > type ! = TEXTURE_2D , " 2D textures must have a layer count of 1 " ) ;
2023-11-10 19:15:16 +00:00
lovrCheck ( info - > layers % 6 = = 0 | | info - > type ! = TEXTURE_CUBE , " Cubemap layer count must be a multiple of 6 " ) ;
2022-04-30 03:38:34 +00:00
lovrCheck ( info - > width = = info - > height | | info - > type ! = TEXTURE_CUBE , " Cubemaps must be square " ) ;
2022-07-30 22:08:30 +00:00
lovrCheck ( measureTexture ( info - > format , info - > width , info - > height , info - > layers ) < 1 < < 30 , " Memory for a Texture can not exceed 1GB " ) ; // TODO mip?
2023-07-11 02:21:11 +00:00
lovrCheck ( ~ info - > usage & TEXTURE_SAMPLE | | ( supports & GPU_FEATURE_SAMPLE ) , " GPU does not support the 'sample' flag for this texture format/encoding " ) ;
lovrCheck ( ~ info - > usage & TEXTURE_RENDER | | ( supports & GPU_FEATURE_RENDER ) , " GPU does not support the 'render' flag for this texture format/encoding " ) ;
2023-11-29 06:44:37 +00:00
lovrCheck ( ~ info - > usage & TEXTURE_STORAGE | | ( linearSupports & GPU_FEATURE_STORAGE ) , " GPU does not support the 'storage' flag for this texture format " ) ;
2022-04-30 03:38:34 +00:00
lovrCheck ( ~ info - > usage & TEXTURE_RENDER | | info - > width < = state . limits . renderSize [ 0 ] , " Texture has 'render' flag but its size exceeds the renderSize limit " ) ;
lovrCheck ( ~ info - > usage & TEXTURE_RENDER | | info - > height < = state . limits . renderSize [ 1 ] , " Texture has 'render' flag but its size exceeds the renderSize limit " ) ;
2023-02-06 01:18:45 +00:00
lovrCheck ( ~ info - > usage & TEXTURE_RENDER | | info - > type ! = TEXTURE_3D | | ! isDepthFormat ( info - > format ) , " 3D depth textures can not have the 'render' flag " ) ;
2022-04-30 03:38:34 +00:00
lovrCheck ( ( info - > format < FORMAT_BC1 | | info - > format > FORMAT_BC7 ) | | state . features . textureBC , " %s textures are not supported on this GPU " , " BC " ) ;
lovrCheck ( info - > format < FORMAT_ASTC_4x4 | | state . features . textureASTC , " %s textures are not supported on this GPU " , " ASTC " ) ;
2024-03-11 21:38:00 +00:00
Texture * texture = lovrCalloc ( sizeof ( Texture ) + gpu_sizeof_texture ( ) ) ;
2022-04-30 03:38:34 +00:00
texture - > ref = 1 ;
texture - > gpu = ( gpu_texture * ) ( texture + 1 ) ;
2024-02-26 23:08:34 +00:00
texture - > root = texture ;
2022-04-30 03:38:34 +00:00
texture - > info = * info ;
2022-05-01 01:49:46 +00:00
texture - > info . mipmaps = mipmaps ;
2023-11-02 20:26:49 +00:00
texture - > info . srgb = srgb ;
2022-05-01 01:49:46 +00:00
uint32_t levelCount = 0 ;
uint32_t levelOffsets [ 16 ] ;
uint32_t levelSizes [ 16 ] ;
2023-12-30 20:39:50 +00:00
BufferView view = { 0 } ;
2022-05-01 01:49:46 +00:00
2023-03-10 02:36:23 +00:00
beginFrame ( ) ;
2022-05-01 01:49:46 +00:00
if ( info - > imageCount > 0 ) {
levelCount = lovrImageGetLevelCount ( info - > images [ 0 ] ) ;
lovrCheck ( info - > type ! = TEXTURE_3D | | levelCount = = 1 , " Images used to initialize 3D textures can not have mipmaps " ) ;
uint32_t total = 0 ;
for ( uint32_t level = 0 ; level < levelCount ; level + + ) {
levelOffsets [ level ] = total ;
uint32_t width = MAX ( info - > width > > level , 1 ) ;
uint32_t height = MAX ( info - > height > > level , 1 ) ;
2022-07-30 22:08:30 +00:00
levelSizes [ level ] = measureTexture ( info - > format , width , height , info - > layers ) ;
2022-05-01 01:49:46 +00:00
total + = levelSizes [ level ] ;
}
2023-12-30 20:39:50 +00:00
view = getBuffer ( GPU_BUFFER_UPLOAD , total , 64 ) ;
char * data = view . pointer ;
2022-05-01 01:49:46 +00:00
for ( uint32_t level = 0 ; level < levelCount ; level + + ) {
2022-07-30 22:08:30 +00:00
for ( uint32_t layer = 0 ; layer < info - > layers ; layer + + ) {
2022-05-01 01:49:46 +00:00
Image * image = info - > imageCount = = 1 ? info - > images [ 0 ] : info - > images [ layer ] ;
uint32_t slice = info - > imageCount = = 1 ? layer : 0 ;
2022-08-07 01:05:30 +00:00
size_t size = lovrImageGetLayerSize ( image , level ) ;
2022-07-30 22:08:30 +00:00
lovrCheck ( size = = levelSizes [ level ] / info - > layers , " Texture/Image size mismatch! " ) ;
2022-05-01 22:18:56 +00:00
void * pixels = lovrImageGetLayerData ( image , level , slice ) ;
2022-05-01 01:49:46 +00:00
memcpy ( data , pixels , size ) ;
data + = size ;
}
2023-12-30 20:39:50 +00:00
levelOffsets [ level ] + = view . offset ;
2022-05-01 01:49:46 +00:00
}
}
2022-04-30 03:38:34 +00:00
2023-09-29 12:22:03 +00:00
// Render targets with mipmaps get transfer usage for automipmapping
bool transfer = ( info - > usage & TEXTURE_TRANSFER ) | | ( ( info - > usage & TEXTURE_RENDER ) & & texture - > info . mipmaps > 1 ) ;
2022-04-30 03:38:34 +00:00
gpu_texture_init ( texture - > gpu , & ( gpu_texture_info ) {
. type = ( gpu_texture_type ) info - > type ,
. format = ( gpu_texture_format ) info - > format ,
2022-07-30 22:08:30 +00:00
. size = { info - > width , info - > height , info - > layers } ,
2022-04-30 03:38:34 +00:00
. mipmaps = texture - > info . mipmaps ,
2022-08-26 04:57:15 +00:00
. usage =
( ( info - > usage & TEXTURE_SAMPLE ) ? GPU_TEXTURE_SAMPLE : 0 ) |
( ( info - > usage & TEXTURE_RENDER ) ? GPU_TEXTURE_RENDER : 0 ) |
( ( info - > usage & TEXTURE_STORAGE ) ? GPU_TEXTURE_STORAGE : 0 ) |
2023-11-08 22:54:10 +00:00
( transfer ? GPU_TEXTURE_COPY_SRC | GPU_TEXTURE_COPY_DST : 0 ) ,
2023-11-02 20:26:49 +00:00
. srgb = srgb ,
2022-04-30 03:38:34 +00:00
. handle = info - > handle ,
2022-05-01 01:49:46 +00:00
. label = info - > label ,
. upload = {
2022-06-12 05:55:43 +00:00
. stream = state . stream ,
2023-12-30 20:39:50 +00:00
. buffer = view . buffer ,
2022-05-01 01:49:46 +00:00
. levelCount = levelCount ,
. levelOffsets = levelOffsets ,
2022-10-21 02:37:00 +00:00
. generateMipmaps = levelCount > 0 & & levelCount < mipmaps
2022-05-01 01:49:46 +00:00
}
2022-04-30 03:38:34 +00:00
} ) ;
// Automatically create a renderable view for renderable non-volume textures
2022-07-30 22:08:30 +00:00
if ( ( info - > usage & TEXTURE_RENDER ) & & info - > type ! = TEXTURE_3D & & info - > layers < = state . limits . renderSize [ 2 ] ) {
2022-04-30 03:38:34 +00:00
if ( info - > mipmaps = = 1 ) {
texture - > renderView = texture - > gpu ;
} else {
gpu_texture_view_info view = {
. source = texture - > gpu ,
. type = GPU_TEXTURE_ARRAY ,
2023-11-29 06:44:37 +00:00
. usage = GPU_TEXTURE_RENDER ,
. srgb = srgb ,
2022-07-30 22:08:30 +00:00
. layerCount = info - > layers ,
2022-04-30 03:38:34 +00:00
. levelCount = 1
} ;
2024-03-11 21:38:00 +00:00
texture - > renderView = lovrMalloc ( gpu_sizeof_texture ( ) ) ;
2024-03-10 07:22:32 +00:00
gpu_texture_init_view ( texture - > renderView , & view ) ;
2022-04-30 03:38:34 +00:00
}
}
2023-11-29 06:44:37 +00:00
// Make a linear view of sRGB textures for storage bindings
2023-11-02 20:26:49 +00:00
if ( srgb & & ( info - > usage & TEXTURE_STORAGE ) ) {
2023-11-01 00:14:09 +00:00
gpu_texture_view_info view = {
. source = texture - > gpu ,
. type = ( gpu_texture_type ) info - > type ,
2023-11-29 06:44:37 +00:00
. usage = GPU_TEXTURE_STORAGE ,
. srgb = false
2023-11-01 00:14:09 +00:00
} ;
2024-03-11 21:38:00 +00:00
texture - > storageView = lovrMalloc ( gpu_sizeof_texture ( ) ) ;
2024-03-10 07:22:32 +00:00
gpu_texture_init_view ( texture - > storageView , & view ) ;
2023-11-01 00:14:09 +00:00
} else {
2023-11-29 06:44:37 +00:00
texture - > storageView = texture - > gpu ;
2023-11-01 00:14:09 +00:00
}
2022-06-12 02:07:46 +00:00
// Sample-only textures are exempt from sync tracking to reduce overhead. Instead, they are
// manually synchronized with a single barrier after the upload stream.
if ( info - > usage = = TEXTURE_SAMPLE ) {
2024-03-02 01:13:06 +00:00
state . barrier . prev | = GPU_PHASE_COPY | GPU_PHASE_BLIT ;
state . barrier . next | = GPU_PHASE_SHADER_VERTEX | GPU_PHASE_SHADER_FRAGMENT | GPU_PHASE_SHADER_COMPUTE ;
state . barrier . flush | = GPU_CACHE_TRANSFER_WRITE ;
state . barrier . clear | = GPU_CACHE_TEXTURE ;
2022-06-12 02:07:46 +00:00
} else if ( levelCount > 0 ) {
2024-01-14 22:51:23 +00:00
texture - > sync . writePhase = GPU_PHASE_COPY | GPU_PHASE_BLIT ;
2022-06-12 05:55:43 +00:00
texture - > sync . pendingWrite = GPU_CACHE_TRANSFER_WRITE ;
2023-04-30 06:02:37 +00:00
texture - > sync . lastTransferWrite = state . tick ;
2022-06-12 02:07:46 +00:00
}
2024-03-02 01:13:06 +00:00
texture - > sync . barrier = & state . barrier ;
2022-04-30 03:38:34 +00:00
return texture ;
}
2024-02-26 23:08:34 +00:00
Texture * lovrTextureCreateView ( Texture * parent , const TextureViewInfo * info ) {
const TextureInfo * base = & parent - > info ;
uint32_t maxLayers = base - > type = = TEXTURE_3D ? MAX ( base - > layers > > info - > levelIndex , 1 ) : base - > layers ;
lovrCheck ( info - > type ! = TEXTURE_3D , " Texture views can't be 3D textures " ) ;
lovrCheck ( info - > layerCount > 0 , " Texture view must have at least one layer " ) ;
lovrCheck ( info - > levelCount > 0 , " Texture view must have at least one mipmap " ) ;
lovrCheck ( info - > layerCount = = ~ 0u | | info - > layerIndex + info - > layerCount < = maxLayers , " Texture view layer range exceeds layer count of parent texture " ) ;
lovrCheck ( info - > levelCount = = ~ 0u | | info - > levelIndex + info - > levelCount < = base - > mipmaps , " Texture view mipmap range exceeds mipmap count of parent texture " ) ;
lovrCheck ( info - > layerCount = = 1 | | info - > type ! = TEXTURE_2D , " 2D textures can only have a single layer " ) ;
lovrCheck ( info - > levelCount = = 1 | | base - > type ! = TEXTURE_3D , " Views of volume textures may only have a single mipmap level " ) ;
lovrCheck ( info - > layerCount % 6 = = 0 | | info - > type ! = TEXTURE_CUBE , " Cubemap layer count must be a multiple of 6 " ) ;
2022-04-30 03:56:23 +00:00
2024-03-11 21:38:00 +00:00
Texture * texture = lovrCalloc ( sizeof ( Texture ) + gpu_sizeof_texture ( ) ) ;
2022-04-30 03:56:23 +00:00
texture - > ref = 1 ;
texture - > gpu = ( gpu_texture * ) ( texture + 1 ) ;
2024-02-26 23:08:34 +00:00
texture - > info = * base ;
texture - > root = parent - > root ;
texture - > baseLayer = parent - > baseLayer + info - > layerIndex ;
texture - > baseLevel = parent - > baseLevel + info - > levelIndex ;
texture - > info . type = info - > type ;
texture - > info . width = MAX ( base - > width > > info - > levelIndex , 1 ) ;
texture - > info . height = MAX ( base - > height > > info - > levelIndex , 1 ) ;
texture - > info . layers = info - > layerCount = = ~ 0u ? base - > layers : info - > layerCount ;
texture - > info . mipmaps = info - > levelCount = = ~ 0u ? base - > mipmaps : info - > levelCount ;
if ( base - > usage & ( TEXTURE_SAMPLE | TEXTURE_RENDER ) ) {
2023-11-29 06:44:37 +00:00
gpu_texture_init_view ( texture - > gpu , & ( gpu_texture_view_info ) {
2024-02-26 23:08:34 +00:00
. source = texture - > root - > gpu ,
. type = ( gpu_texture_type ) info - > type ,
. usage = base - > usage ,
. srgb = base - > srgb ,
. layerIndex = texture - > baseLayer ,
. layerCount = info - > layerCount ,
. levelIndex = texture - > baseLevel ,
. levelCount = info - > levelCount ,
. label = info - > label
2023-11-29 06:44:37 +00:00
} ) ;
} else {
texture - > gpu = NULL ;
}
2022-04-30 03:56:23 +00:00
2024-02-26 23:08:34 +00:00
if ( ( base - > usage & TEXTURE_RENDER ) & & info - > layerCount < = state . limits . renderSize [ 2 ] ) {
if ( info - > levelCount = = 1 ) {
2023-02-06 01:34:24 +00:00
texture - > renderView = texture - > gpu ;
} else {
gpu_texture_view_info subview = {
2024-02-26 23:08:34 +00:00
. source = texture - > root - > gpu ,
2023-02-06 01:34:24 +00:00
. type = GPU_TEXTURE_ARRAY ,
2023-11-29 06:44:37 +00:00
. usage = GPU_TEXTURE_RENDER ,
2024-02-26 23:08:34 +00:00
. layerIndex = texture - > baseLayer ,
. layerCount = info - > layerCount ,
. levelIndex = texture - > baseLevel ,
2023-02-06 01:34:24 +00:00
. levelCount = 1
} ;
2024-03-11 21:38:00 +00:00
texture - > renderView = lovrMalloc ( gpu_sizeof_texture ( ) ) ;
2024-03-10 07:22:32 +00:00
gpu_texture_init_view ( texture - > renderView , & subview ) ;
2023-02-06 01:34:24 +00:00
}
2022-04-30 03:56:23 +00:00
}
2024-02-26 23:08:34 +00:00
if ( ( base - > usage & TEXTURE_STORAGE ) & & base - > srgb ) {
2023-11-29 06:44:37 +00:00
gpu_texture_view_info subview = {
2024-02-26 23:08:34 +00:00
. source = texture - > root - > gpu ,
. type = ( gpu_texture_type ) base - > type ,
2023-11-29 06:44:37 +00:00
. usage = GPU_TEXTURE_STORAGE ,
. srgb = false ,
2024-02-26 23:08:34 +00:00
. layerIndex = texture - > baseLayer ,
. layerCount = info - > layerCount ,
. levelIndex = texture - > baseLevel ,
. levelCount = info - > levelCount
2023-11-29 06:44:37 +00:00
} ;
2024-03-11 21:38:00 +00:00
texture - > storageView = lovrMalloc ( gpu_sizeof_texture ( ) ) ;
2024-03-10 07:22:32 +00:00
gpu_texture_init_view ( texture - > storageView , & subview ) ;
2023-11-29 06:44:37 +00:00
} else {
texture - > storageView = texture - > gpu ;
}
2024-02-26 23:08:34 +00:00
lovrRetain ( texture - > root ) ;
2022-04-30 03:56:23 +00:00
return texture ;
}
2022-04-30 03:38:34 +00:00
void lovrTextureDestroy ( void * ref ) {
Texture * texture = ref ;
2022-05-11 22:28:04 +00:00
if ( texture ! = state . window ) {
2023-12-03 01:27:31 +00:00
flushTransfers ( ) ;
2022-06-25 02:38:45 +00:00
lovrRelease ( texture - > material , lovrMaterialDestroy ) ;
2024-02-26 23:08:34 +00:00
if ( texture - > root ! = texture ) lovrRelease ( texture - > root , lovrTextureDestroy ) ;
2022-05-11 22:28:04 +00:00
if ( texture - > renderView & & texture - > renderView ! = texture - > gpu ) gpu_texture_destroy ( texture - > renderView ) ;
2023-11-29 06:44:37 +00:00
if ( texture - > storageView & & texture - > storageView ! = texture - > gpu ) gpu_texture_destroy ( texture - > storageView ) ;
2022-05-11 22:28:04 +00:00
if ( texture - > gpu ) gpu_texture_destroy ( texture - > gpu ) ;
}
2024-03-11 21:38:00 +00:00
lovrFree ( texture ) ;
2022-04-30 03:38:34 +00:00
}
2022-04-30 03:56:23 +00:00
const TextureInfo * lovrTextureGetInfo ( Texture * texture ) {
return & texture - > info ;
}
2024-02-26 23:08:34 +00:00
Texture * lovrTextureGetParent ( Texture * texture ) {
return texture - > root = = texture ? NULL : texture - > root ;
}
2023-04-30 01:25:58 +00:00
Image * lovrTextureGetPixels ( Texture * texture , uint32_t offset [ 4 ] , uint32_t extent [ 3 ] ) {
beginFrame ( ) ;
if ( extent [ 0 ] = = ~ 0u ) extent [ 0 ] = texture - > info . width - offset [ 0 ] ;
if ( extent [ 1 ] = = ~ 0u ) extent [ 1 ] = texture - > info . height - offset [ 1 ] ;
lovrCheck ( extent [ 2 ] = = 1 , " Currently only a single layer can be read from a Texture " ) ;
lovrCheck ( texture - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to read from it " ) ;
checkTextureBounds ( & texture - > info , offset , extent ) ;
2022-06-25 02:38:45 +00:00
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & texture - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2024-02-26 23:08:34 +00:00
uint32_t rootOffset [ 4 ] = { offset [ 0 ] , offset [ 1 ] , offset [ 2 ] + texture - > baseLayer , offset [ 3 ] + texture - > baseLevel } ;
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_DOWNLOAD , measureTexture ( texture - > info . format , extent [ 0 ] , extent [ 1 ] , 1 ) , 64 ) ;
2024-02-26 23:08:34 +00:00
gpu_copy_texture_buffer ( state . stream , texture - > root - > gpu , view . buffer , rootOffset , view . offset , extent ) ;
2023-04-30 01:25:58 +00:00
lovrGraphicsSubmit ( NULL , 0 ) ;
lovrGraphicsWait ( ) ;
2023-11-02 20:38:21 +00:00
Image * image = lovrImageCreateRaw ( extent [ 0 ] , extent [ 1 ] , texture - > info . format , texture - > info . srgb ) ;
2023-04-30 01:25:58 +00:00
void * data = lovrImageGetLayerData ( image , offset [ 3 ] , offset [ 2 ] ) ;
2023-12-30 20:39:50 +00:00
memcpy ( data , view . pointer , view . extent ) ;
2023-04-30 01:25:58 +00:00
return image ;
}
void lovrTextureSetPixels ( Texture * texture , Image * image , uint32_t srcOffset [ 4 ] , uint32_t dstOffset [ 4 ] , uint32_t extent [ 3 ] ) {
beginFrame ( ) ;
2024-02-26 23:08:34 +00:00
TextureFormat format = texture - > info . format ;
2023-04-30 01:25:58 +00:00
if ( extent [ 0 ] = = ~ 0u ) extent [ 0 ] = MIN ( texture - > info . width - dstOffset [ 0 ] , lovrImageGetWidth ( image , srcOffset [ 3 ] ) - srcOffset [ 0 ] ) ;
if ( extent [ 1 ] = = ~ 0u ) extent [ 1 ] = MIN ( texture - > info . height - dstOffset [ 1 ] , lovrImageGetHeight ( image , srcOffset [ 3 ] ) - srcOffset [ 1 ] ) ;
if ( extent [ 2 ] = = ~ 0u ) extent [ 2 ] = MIN ( texture - > info . layers - dstOffset [ 2 ] , lovrImageGetLayerCount ( image ) - srcOffset [ 2 ] ) ;
lovrCheck ( texture - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to copy to it " ) ;
2024-02-26 23:08:34 +00:00
lovrCheck ( lovrImageGetFormat ( image ) = = format , " Image and Texture formats must match " ) ;
2023-04-30 01:25:58 +00:00
lovrCheck ( srcOffset [ 0 ] + extent [ 0 ] < = lovrImageGetWidth ( image , srcOffset [ 3 ] ) , " Image copy region exceeds its %s " , " width " ) ;
lovrCheck ( srcOffset [ 1 ] + extent [ 1 ] < = lovrImageGetHeight ( image , srcOffset [ 3 ] ) , " Image copy region exceeds its %s " , " height " ) ;
lovrCheck ( srcOffset [ 2 ] + extent [ 2 ] < = lovrImageGetLayerCount ( image ) , " Image copy region exceeds its %s " , " layer count " ) ;
lovrCheck ( srcOffset [ 3 ] < lovrImageGetLevelCount ( image ) , " Image copy region exceeds its %s " , " mipmap count " ) ;
checkTextureBounds ( & texture - > info , dstOffset , extent ) ;
2024-02-26 23:08:34 +00:00
uint32_t rowSize = measureTexture ( format , extent [ 0 ] , 1 , 1 ) ;
uint32_t totalSize = measureTexture ( format , extent [ 0 ] , extent [ 1 ] , 1 ) * extent [ 2 ] ;
uint32_t layerOffset = measureTexture ( format , extent [ 0 ] , srcOffset [ 1 ] , 1 ) ;
layerOffset + = measureTexture ( format , srcOffset [ 0 ] , 1 , 1 ) ;
uint32_t pitch = measureTexture ( format , lovrImageGetWidth ( image , srcOffset [ 3 ] ) , 1 , 1 ) ;
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_UPLOAD , totalSize , 64 ) ;
char * dst = view . pointer ;
2023-04-30 01:25:58 +00:00
for ( uint32_t z = 0 ; z < extent [ 2 ] ; z + + ) {
const char * src = ( char * ) lovrImageGetLayerData ( image , srcOffset [ 3 ] , z ) + layerOffset ;
for ( uint32_t y = 0 ; y < extent [ 1 ] ; y + + ) {
memcpy ( dst , src , rowSize ) ;
dst + = rowSize ;
src + = pitch ;
}
2022-06-25 02:38:45 +00:00
}
2024-02-26 23:08:34 +00:00
gpu_barrier barrier = syncTransfer ( & texture - > root - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_WRITE ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2024-02-26 23:08:34 +00:00
uint32_t rootOffset [ 4 ] = { dstOffset [ 0 ] , dstOffset [ 1 ] , dstOffset [ 2 ] + texture - > baseLayer , dstOffset [ 3 ] + texture - > baseLevel } ;
gpu_copy_buffer_texture ( state . stream , view . buffer , texture - > root - > gpu , view . offset , rootOffset , extent ) ;
2023-04-30 01:25:58 +00:00
}
2022-06-25 02:38:45 +00:00
2023-04-30 01:25:58 +00:00
void lovrTextureCopy ( Texture * src , Texture * dst , uint32_t srcOffset [ 4 ] , uint32_t dstOffset [ 4 ] , uint32_t extent [ 3 ] ) {
beginFrame ( ) ;
if ( extent [ 0 ] = = ~ 0u ) extent [ 0 ] = MIN ( src - > info . width - srcOffset [ 0 ] , dst - > info . width - dstOffset [ 0 ] ) ;
if ( extent [ 1 ] = = ~ 0u ) extent [ 1 ] = MIN ( src - > info . height - srcOffset [ 1 ] , dst - > info . height - dstOffset [ 0 ] ) ;
if ( extent [ 2 ] = = ~ 0u ) extent [ 2 ] = MIN ( src - > info . layers - srcOffset [ 2 ] , dst - > info . layers - dstOffset [ 0 ] ) ;
lovrCheck ( src - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to copy %s it " , " from " ) ;
lovrCheck ( dst - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to copy %s it " , " to " ) ;
lovrCheck ( src - > info . format = = dst - > info . format , " Copying between Textures requires them to have the same format " ) ;
checkTextureBounds ( & src - > info , srcOffset , extent ) ;
checkTextureBounds ( & dst - > info , dstOffset , extent ) ;
gpu_barrier barriers [ 2 ] ;
2024-02-26 23:08:34 +00:00
barriers [ 0 ] = syncTransfer ( & src - > root - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
barriers [ 1 ] = syncTransfer ( & dst - > root - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_WRITE ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , barriers , 2 ) ;
2024-02-26 23:08:34 +00:00
uint32_t srcRootOffset [ 4 ] = { srcOffset [ 0 ] , srcOffset [ 1 ] , srcOffset [ 2 ] + src - > baseLayer , srcOffset [ 3 ] + src - > baseLevel } ;
uint32_t dstRootOffset [ 4 ] = { dstOffset [ 0 ] , dstOffset [ 1 ] , dstOffset [ 2 ] + dst - > baseLayer , dstOffset [ 3 ] + dst - > baseLevel } ;
gpu_copy_textures ( state . stream , src - > root - > gpu , dst - > root - > gpu , srcRootOffset , dstRootOffset , extent ) ;
2023-04-30 01:25:58 +00:00
}
void lovrTextureBlit ( Texture * src , Texture * dst , uint32_t srcOffset [ 4 ] , uint32_t dstOffset [ 4 ] , uint32_t srcExtent [ 3 ] , uint32_t dstExtent [ 3 ] , FilterMode filter ) {
beginFrame ( ) ;
if ( srcExtent [ 0 ] = = ~ 0u ) srcExtent [ 0 ] = src - > info . width - srcOffset [ 0 ] ;
if ( srcExtent [ 1 ] = = ~ 0u ) srcExtent [ 1 ] = src - > info . height - srcOffset [ 1 ] ;
if ( srcExtent [ 2 ] = = ~ 0u ) srcExtent [ 2 ] = src - > info . layers - srcOffset [ 2 ] ;
if ( dstExtent [ 0 ] = = ~ 0u ) dstExtent [ 0 ] = dst - > info . width - dstOffset [ 0 ] ;
if ( dstExtent [ 1 ] = = ~ 0u ) dstExtent [ 1 ] = dst - > info . height - dstOffset [ 1 ] ;
if ( dstExtent [ 2 ] = = ~ 0u ) dstExtent [ 2 ] = dst - > info . layers - dstOffset [ 2 ] ;
2023-11-02 22:33:29 +00:00
uint32_t supports = state . features . formats [ src - > info . format ] [ src - > info . srgb ] ;
2023-04-30 01:25:58 +00:00
lovrCheck ( src - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to blit %s it " , " from " ) ;
lovrCheck ( dst - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to blit %s it " , " to " ) ;
2023-11-02 22:33:29 +00:00
lovrCheck ( supports & GPU_FEATURE_BLIT , " This GPU does not support blitting this texture format/encoding " ) ;
lovrCheck ( src - > info . format = = dst - > info . format & & src - > info . srgb = = dst - > info . srgb , " Texture formats must match to blit between them " ) ;
2023-04-30 01:25:58 +00:00
lovrCheck ( ( ( src - > info . type = = TEXTURE_3D ) ^ ( dst - > info . type = = TEXTURE_3D ) ) = = false , " 3D textures can only be blitted with other 3D textures " ) ;
lovrCheck ( src - > info . type = = TEXTURE_3D | | srcExtent [ 2 ] = = dstExtent [ 2 ] , " When blitting between non-3D textures, blit layer counts must match " ) ;
checkTextureBounds ( & src - > info , srcOffset , srcExtent ) ;
checkTextureBounds ( & dst - > info , dstOffset , dstExtent ) ;
gpu_barrier barriers [ 2 ] ;
2024-02-26 23:08:34 +00:00
barriers [ 0 ] = syncTransfer ( & src - > root - > sync , GPU_PHASE_BLIT , GPU_CACHE_TRANSFER_READ ) ;
barriers [ 1 ] = syncTransfer ( & dst - > root - > sync , GPU_PHASE_BLIT , GPU_CACHE_TRANSFER_WRITE ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , barriers , 2 ) ;
2024-02-26 23:08:34 +00:00
uint32_t srcRootOffset [ 4 ] = { srcOffset [ 0 ] , srcOffset [ 1 ] , srcOffset [ 2 ] + src - > baseLayer , srcOffset [ 3 ] + src - > baseLevel } ;
uint32_t dstRootOffset [ 4 ] = { dstOffset [ 0 ] , dstOffset [ 1 ] , dstOffset [ 2 ] + dst - > baseLayer , dstOffset [ 3 ] + dst - > baseLevel } ;
gpu_blit ( state . stream , src - > root - > gpu , dst - > root - > gpu , srcRootOffset , dstRootOffset , srcExtent , dstExtent , ( gpu_filter ) filter ) ;
2023-04-30 01:25:58 +00:00
}
void lovrTextureClear ( Texture * texture , float value [ 4 ] , uint32_t layer , uint32_t layerCount , uint32_t level , uint32_t levelCount ) {
beginFrame ( ) ;
2023-04-30 06:02:37 +00:00
if ( layerCount = = ~ 0u ) layerCount = texture - > info . layers - layer ;
if ( levelCount = = ~ 0u ) levelCount = texture - > info . mipmaps - level ;
2023-04-30 01:25:58 +00:00
lovrCheck ( texture - > info . usage & TEXTURE_TRANSFER , " Texture must be created with 'transfer' usage to clear it " ) ;
lovrCheck ( texture - > info . type = = TEXTURE_3D | | layer + layerCount < = texture - > info . layers , " Texture clear range exceeds texture layer count " ) ;
lovrCheck ( level + levelCount < = texture - > info . mipmaps , " Texture clear range exceeds texture mipmap count " ) ;
2024-02-26 23:08:34 +00:00
gpu_barrier barrier = syncTransfer ( & texture - > root - > sync , GPU_PHASE_CLEAR , GPU_CACHE_TRANSFER_WRITE ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2024-02-26 23:08:34 +00:00
gpu_clear_texture ( state . stream , texture - > root - > gpu , value , texture - > baseLayer + layer , layerCount , texture - > baseLevel + level , levelCount ) ;
2023-04-30 01:25:58 +00:00
}
void lovrTextureGenerateMipmaps ( Texture * texture , uint32_t base , uint32_t count ) {
beginFrame ( ) ;
if ( count = = ~ 0u ) count = texture - > info . mipmaps - ( base + 1 ) ;
2023-07-11 02:21:11 +00:00
uint32_t supports = state . features . formats [ texture - > info . format ] [ texture - > info . srgb ] ;
2023-04-30 01:25:58 +00:00
lovrCheck ( texture - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to mipmap it " ) ;
2023-11-02 22:33:29 +00:00
lovrCheck ( supports & GPU_FEATURE_BLIT , " This GPU does not support mipmapping this texture format/encoding " ) ;
2023-04-30 01:25:58 +00:00
lovrCheck ( base + count < texture - > info . mipmaps , " Trying to generate too many mipmaps " ) ;
2024-02-26 23:08:34 +00:00
gpu_barrier barrier = syncTransfer ( & texture - > root - > sync , GPU_PHASE_BLIT , GPU_CACHE_TRANSFER_READ | GPU_CACHE_TRANSFER_WRITE ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2024-02-26 23:08:34 +00:00
mipmapTexture ( state . stream , texture , texture - > baseLevel + base , count ) ;
2022-06-25 02:38:45 +00:00
}
2023-10-31 23:08:00 +00:00
Material * lovrTextureToMaterial ( Texture * texture ) {
if ( ! texture - > material ) {
texture - > material = lovrMaterialCreate ( & ( MaterialInfo ) {
. data . color = { 1.f , 1.f , 1.f , 1.f } ,
. data . uvScale = { 1.f , 1.f } ,
. texture = texture
} ) ;
// Since the Material refcounts the Texture, this creates a cycle. Release the texture to make
// sure this is a weak relationship (the automaterial does not keep the texture refcounted).
lovrRelease ( texture , lovrTextureDestroy ) ;
texture - > material - > info . texture = NULL ;
}
return texture - > material ;
}
2022-05-01 22:47:17 +00:00
// Sampler
2022-06-08 03:42:10 +00:00
Sampler * lovrGraphicsGetDefaultSampler ( FilterMode mode ) {
return state . defaultSamplers [ mode ] ;
}
2022-07-13 07:07:15 +00:00
Sampler * lovrSamplerCreate ( const SamplerInfo * info ) {
2022-05-01 22:47:17 +00:00
lovrCheck ( info - > range [ 1 ] < 0.f | | info - > range [ 1 ] > = info - > range [ 0 ] , " Invalid Sampler mipmap range " ) ;
lovrCheck ( info - > anisotropy < = state . limits . anisotropy , " Sampler anisotropy (%f) exceeds anisotropy limit (%f) " , info - > anisotropy , state . limits . anisotropy ) ;
2024-03-11 21:38:00 +00:00
Sampler * sampler = lovrCalloc ( sizeof ( Sampler ) + gpu_sizeof_sampler ( ) ) ;
2022-05-09 18:47:06 +00:00
sampler - > ref = 1 ;
2022-05-01 22:47:17 +00:00
sampler - > gpu = ( gpu_sampler * ) ( sampler + 1 ) ;
sampler - > info = * info ;
gpu_sampler_info gpu = {
. min = ( gpu_filter ) info - > min ,
. mag = ( gpu_filter ) info - > mag ,
. mip = ( gpu_filter ) info - > mip ,
. wrap [ 0 ] = ( gpu_wrap ) info - > wrap [ 0 ] ,
. wrap [ 1 ] = ( gpu_wrap ) info - > wrap [ 1 ] ,
. wrap [ 2 ] = ( gpu_wrap ) info - > wrap [ 2 ] ,
. compare = ( gpu_compare_mode ) info - > compare ,
. anisotropy = MIN ( info - > anisotropy , state . limits . anisotropy ) ,
. lodClamp = { info - > range [ 0 ] , info - > range [ 1 ] }
} ;
2022-05-09 18:47:06 +00:00
gpu_sampler_init ( sampler - > gpu , & gpu ) ;
2022-05-01 22:47:17 +00:00
return sampler ;
}
void lovrSamplerDestroy ( void * ref ) {
Sampler * sampler = ref ;
gpu_sampler_destroy ( sampler - > gpu ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( sampler ) ;
2022-05-01 22:47:17 +00:00
}
const SamplerInfo * lovrSamplerGetInfo ( Sampler * sampler ) {
return & sampler - > info ;
}
2022-05-09 18:47:06 +00:00
// Shader
2023-09-29 05:03:08 +00:00
# ifdef LOVR_USE_GLSLANG
static glsl_include_result_t * includer ( void * cb , const char * path , const char * includer , size_t depth ) {
if ( ! strcmp ( path , includer ) ) {
return NULL ;
}
glsl_include_result_t * result = tempAlloc ( & state . allocator , sizeof ( * result ) ) ;
lovrAssert ( result , " Out of memory " ) ;
result - > header_name = path ;
result - > header_data = ( ( ShaderIncluder * ) cb ) ( path , & result - > header_length ) ;
2023-11-30 17:03:33 +00:00
if ( ! result - > header_data ) return NULL ;
2023-09-29 05:03:08 +00:00
return result ;
}
# endif
2024-02-20 23:07:30 +00:00
void lovrGraphicsCompileShader ( ShaderSource * stages , ShaderSource * outputs , uint32_t stageCount , ShaderIncluder * io ) {
2022-05-09 18:47:06 +00:00
# ifdef LOVR_USE_GLSLANG
2024-02-20 23:07:30 +00:00
const glslang_stage_t stageMap [ ] = {
2022-05-09 18:47:06 +00:00
[ STAGE_VERTEX ] = GLSLANG_STAGE_VERTEX ,
[ STAGE_FRAGMENT ] = GLSLANG_STAGE_FRAGMENT ,
[ STAGE_COMPUTE ] = GLSLANG_STAGE_COMPUTE
} ;
2022-07-07 05:54:56 +00:00
const char * stageNames [ ] = {
[ STAGE_VERTEX ] = " vertex " ,
[ STAGE_FRAGMENT ] = " fragment " ,
[ STAGE_COMPUTE ] = " compute "
} ;
2022-06-16 03:46:43 +00:00
const char * prefix = " "
" #version 460 \n "
" #extension GL_EXT_multiview : require \n "
2023-06-24 21:00:06 +00:00
" #extension GL_EXT_samplerless_texture_functions : require \n "
2022-06-16 03:46:43 +00:00
" #extension GL_GOOGLE_include_directive : require \n " ;
2024-02-20 23:07:30 +00:00
glslang_program_t * program = NULL ;
glslang_shader_t * shaders [ 2 ] = { 0 } ;
2022-06-10 00:44:20 +00:00
2024-02-20 23:07:30 +00:00
if ( stageCount > COUNTOF ( shaders ) ) {
lovrUnreachable ( ) ;
}
2022-08-07 01:05:30 +00:00
2024-02-20 23:07:30 +00:00
for ( uint32_t i = 0 ; i < stageCount ; i + + ) {
ShaderSource * source = & stages [ i ] ;
// It's okay to pass precompiled SPIR-V here, and it will be returned unchanged. However, it's
// dangerous to mix SPIR-V and GLSL because then glslang won't perform cross-stage linking,
// which means that e.g. the default uniform block might be different for each stage. This
// isn't a problem when using the default shaders since they don't use uniforms.
uint32_t magic = 0x07230203 ;
if ( source - > size % 4 = = 0 & & source - > size > = 4 & & ! memcmp ( source - > code , & magic , 4 ) ) {
outputs [ i ] = stages [ i ] ;
continue ;
} else if ( ! program ) {
program = glslang_program_create ( ) ;
}
2022-06-10 00:44:20 +00:00
2024-02-20 23:07:30 +00:00
const char * strings [ ] = {
prefix ,
( const char * ) etc_shaders_lovr_glsl ,
" #line 1 \n " ,
source - > code
} ;
2022-05-09 18:47:06 +00:00
2024-02-20 23:07:30 +00:00
lovrCheck ( source - > size < = INT_MAX , " Shader is way too big " ) ;
2022-05-09 18:47:06 +00:00
2024-02-20 23:07:30 +00:00
int lengths [ ] = {
- 1 ,
etc_shaders_lovr_glsl_len ,
- 1 ,
( int ) source - > size
} ;
2022-08-03 05:05:44 +00:00
2024-02-20 23:07:30 +00:00
const glslang_resource_t * resource = glslang_default_resource ( ) ;
glslang_input_t input = {
. language = GLSLANG_SOURCE_GLSL ,
. stage = stageMap [ source - > stage ] ,
. client = GLSLANG_CLIENT_VULKAN ,
. client_version = GLSLANG_TARGET_VULKAN_1_1 ,
. target_language = GLSLANG_TARGET_SPV ,
. target_language_version = GLSLANG_TARGET_SPV_1_3 ,
. strings = strings ,
. lengths = lengths ,
. string_count = COUNTOF ( strings ) ,
. default_version = 460 ,
. default_profile = GLSLANG_NO_PROFILE ,
. forward_compatible = true ,
. resource = resource ,
. callbacks . include_local = includer ,
. callbacks_ctx = ( void * ) io
} ;
2022-08-03 05:05:44 +00:00
2024-02-20 23:07:30 +00:00
shaders [ i ] = glslang_shader_create ( & input ) ;
2022-08-03 05:05:44 +00:00
2024-02-20 23:07:30 +00:00
int options = 0 ;
options | = GLSLANG_SHADER_AUTO_MAP_BINDINGS ;
options | = GLSLANG_SHADER_AUTO_MAP_LOCATIONS ;
options | = GLSLANG_SHADER_VULKAN_RULES_RELAXED ;
glslang_shader_set_options ( shaders [ i ] , options ) ;
if ( ! glslang_shader_preprocess ( shaders [ i ] , & input ) ) {
lovrThrow ( " Could not preprocess %s shader: \n %s " , stageNames [ source - > stage ] , glslang_shader_get_info_log ( shaders [ i ] ) ) ;
}
if ( ! glslang_shader_parse ( shaders [ i ] , & input ) ) {
lovrThrow ( " Could not parse %s shader: \n %s " , stageNames [ source - > stage ] , glslang_shader_get_info_log ( shaders [ i ] ) ) ;
}
2022-05-09 18:47:06 +00:00
2024-02-20 23:07:30 +00:00
glslang_program_add_shader ( program , shaders [ i ] ) ;
2022-05-09 18:47:06 +00:00
}
2024-02-20 23:07:30 +00:00
// We might not need to do anything if all the inputs were already SPIR-V
if ( ! program ) {
return ;
}
2022-05-09 18:47:06 +00:00
if ( ! glslang_program_link ( program , 0 ) ) {
2022-07-10 04:39:31 +00:00
lovrThrow ( " Could not link shader: \n %s " , glslang_program_get_info_log ( program ) ) ;
2022-05-09 18:47:06 +00:00
}
2023-04-15 04:21:14 +00:00
glslang_program_map_io ( program ) ;
2023-04-20 03:49:34 +00:00
glslang_spv_options_t spvOptions = { 0 } ;
if ( state . config . debug & & state . features . shaderDebug ) {
spvOptions . generate_debug_info = true ;
spvOptions . emit_nonsemantic_shader_debug_info = true ;
spvOptions . emit_nonsemantic_shader_debug_source = true ;
}
2024-02-20 23:07:30 +00:00
for ( uint32_t i = 0 ; i < stageCount ; i + + ) {
if ( ! shaders [ i ] ) continue ;
2022-05-09 18:47:06 +00:00
2024-02-20 23:07:30 +00:00
ShaderSource * source = & stages [ i ] ;
2022-07-10 06:09:02 +00:00
2024-02-20 23:07:30 +00:00
if ( state . config . debug & & state . features . shaderDebug ) {
glslang_program_add_source_text ( program , stageMap [ source - > stage ] , source - > code , source - > size ) ;
}
2022-05-09 18:47:06 +00:00
2024-02-20 23:07:30 +00:00
glslang_program_SPIRV_generate_with_options ( program , stageMap [ source - > stage ] , & spvOptions ) ;
void * words = glslang_program_SPIRV_get_ptr ( program ) ;
size_t size = glslang_program_SPIRV_get_size ( program ) * 4 ;
2022-05-09 18:47:06 +00:00
2024-03-11 21:38:00 +00:00
void * data = lovrMalloc ( size ) ;
2024-02-20 23:07:30 +00:00
memcpy ( data , words , size ) ;
outputs [ i ] . stage = source - > stage ;
outputs [ i ] . code = data ;
outputs [ i ] . size = size ;
2022-05-09 18:47:06 +00:00
2024-02-20 23:07:30 +00:00
glslang_shader_delete ( shaders [ i ] ) ;
}
glslang_program_delete ( program ) ;
2022-05-22 22:09:09 +00:00
# else
2022-07-10 04:39:31 +00:00
lovrThrow ( " Could not compile shader: No shader compiler available " ) ;
2022-05-22 22:09:09 +00:00
# endif
}
static void lovrShaderInit ( Shader * shader ) {
// Shaders store the full list of their flags so clones can override them, but they are reordered
// to put overridden (active) ones first, so a contiguous list can be used to create pipelines
for ( uint32_t i = 0 ; i < shader - > info . flagCount ; i + + ) {
ShaderFlag * flag = & shader - > info . flags [ i ] ;
uint32_t hash = flag - > name ? ( uint32_t ) hash64 ( flag - > name , strlen ( flag - > name ) ) : 0 ;
2022-08-26 17:22:53 +00:00
2022-05-22 22:09:09 +00:00
for ( uint32_t j = 0 ; j < shader - > flagCount ; j + + ) {
if ( hash ? ( hash ! = shader - > flagLookup [ j ] ) : ( flag - > id ! = shader - > flags [ j ] . id ) ) continue ;
uint32_t index = shader - > overrideCount + + ;
if ( index ! = j ) {
gpu_shader_flag temp = shader - > flags [ index ] ;
shader - > flags [ index ] = shader - > flags [ j ] ;
shader - > flags [ j ] = temp ;
uint32_t tempHash = shader - > flagLookup [ index ] ;
shader - > flagLookup [ index ] = shader - > flagLookup [ j ] ;
shader - > flagLookup [ j ] = tempHash ;
}
shader - > flags [ index ] . value = flag - > value ;
}
}
2022-05-24 04:44:42 +00:00
2024-02-20 23:07:30 +00:00
if ( shader - > info . type = = SHADER_COMPUTE ) {
2022-05-24 04:44:42 +00:00
gpu_compute_pipeline_info pipelineInfo = {
. shader = shader - > gpu ,
. flags = shader - > flags ,
. flagCount = shader - > overrideCount
} ;
2023-11-30 08:14:06 +00:00
lovrAssert ( state . pipelineCount < MAX_PIPELINES , " Too many pipelines! " ) ;
shader - > computePipeline = getPipeline ( state . pipelineCount + + ) ;
os_vm_commit ( state . pipelines , state . pipelineCount * gpu_sizeof_pipeline ( ) ) ;
gpu_pipeline_init_compute ( shader - > computePipeline , & pipelineInfo ) ;
2022-05-24 04:44:42 +00:00
}
2022-05-09 18:47:06 +00:00
}
2022-08-02 05:10:06 +00:00
ShaderSource lovrGraphicsGetDefaultShaderSource ( DefaultShader type , ShaderStage stage ) {
2024-02-20 23:07:30 +00:00
const ShaderSource sources [ ] [ 3 ] = {
2022-08-02 05:10:06 +00:00
[ SHADER_UNLIT ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_VERTEX ] = { STAGE_VERTEX , lovr_shader_unlit_vert , sizeof ( lovr_shader_unlit_vert ) } ,
[ STAGE_FRAGMENT ] = { STAGE_FRAGMENT , lovr_shader_unlit_frag , sizeof ( lovr_shader_unlit_frag ) }
2022-08-02 05:10:06 +00:00
} ,
2022-09-10 18:07:40 +00:00
[ SHADER_NORMAL ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_VERTEX ] = { STAGE_VERTEX , lovr_shader_unlit_vert , sizeof ( lovr_shader_unlit_vert ) } ,
[ STAGE_FRAGMENT ] = { STAGE_FRAGMENT , lovr_shader_normal_frag , sizeof ( lovr_shader_normal_frag ) }
2022-09-10 18:07:40 +00:00
} ,
2022-08-02 05:10:06 +00:00
[ SHADER_FONT ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_VERTEX ] = { STAGE_VERTEX , lovr_shader_unlit_vert , sizeof ( lovr_shader_unlit_vert ) } ,
[ STAGE_FRAGMENT ] = { STAGE_FRAGMENT , lovr_shader_font_frag , sizeof ( lovr_shader_font_frag ) }
2022-08-02 05:10:06 +00:00
} ,
[ SHADER_CUBEMAP ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_VERTEX ] = { STAGE_VERTEX , lovr_shader_cubemap_vert , sizeof ( lovr_shader_cubemap_vert ) } ,
[ STAGE_FRAGMENT ] = { STAGE_FRAGMENT , lovr_shader_cubemap_frag , sizeof ( lovr_shader_cubemap_frag ) }
2022-08-02 05:10:06 +00:00
} ,
[ SHADER_EQUIRECT ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_VERTEX ] = { STAGE_VERTEX , lovr_shader_cubemap_vert , sizeof ( lovr_shader_cubemap_vert ) } ,
[ STAGE_FRAGMENT ] = { STAGE_FRAGMENT , lovr_shader_equirect_frag , sizeof ( lovr_shader_equirect_frag ) }
2022-08-02 05:10:06 +00:00
} ,
2023-04-27 05:18:06 +00:00
[ SHADER_FILL_2D ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_VERTEX ] = { STAGE_VERTEX , lovr_shader_fill_vert , sizeof ( lovr_shader_fill_vert ) } ,
[ STAGE_FRAGMENT ] = { STAGE_FRAGMENT , lovr_shader_unlit_frag , sizeof ( lovr_shader_unlit_frag ) }
2022-08-02 05:10:06 +00:00
} ,
2022-08-26 16:56:33 +00:00
[ SHADER_FILL_ARRAY ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_VERTEX ] = { STAGE_VERTEX , lovr_shader_fill_vert , sizeof ( lovr_shader_fill_vert ) } ,
[ STAGE_FRAGMENT ] = { STAGE_FRAGMENT , lovr_shader_fill_array_frag , sizeof ( lovr_shader_fill_array_frag ) }
2023-04-27 04:36:30 +00:00
} ,
[ SHADER_ANIMATOR ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_COMPUTE ] = { STAGE_COMPUTE , lovr_shader_animator_comp , sizeof ( lovr_shader_animator_comp ) }
2023-04-27 04:36:30 +00:00
} ,
[ SHADER_BLENDER ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_COMPUTE ] = { STAGE_COMPUTE , lovr_shader_blender_comp , sizeof ( lovr_shader_blender_comp ) }
2023-04-30 06:02:37 +00:00
} ,
[ SHADER_TALLY_MERGE ] = {
2024-02-20 23:07:30 +00:00
[ STAGE_COMPUTE ] = { STAGE_COMPUTE , lovr_shader_tallymerge_comp , sizeof ( lovr_shader_tallymerge_comp ) }
2022-08-02 05:10:06 +00:00
}
} ;
return sources [ type ] [ stage ] ;
}
2022-05-28 03:47:07 +00:00
Shader * lovrGraphicsGetDefaultShader ( DefaultShader type ) {
if ( state . defaultShaders [ type ] ) {
return state . defaultShaders [ type ] ;
}
2023-04-27 04:36:30 +00:00
switch ( type ) {
case SHADER_ANIMATOR :
case SHADER_BLENDER :
2023-04-30 06:02:37 +00:00
case SHADER_TALLY_MERGE :
2023-04-27 04:36:30 +00:00
return state . defaultShaders [ type ] = lovrShaderCreate ( & ( ShaderInfo ) {
2024-02-20 23:07:30 +00:00
. type = SHADER_COMPUTE ,
. stages = ( ShaderSource [ 1 ] ) {
lovrGraphicsGetDefaultShaderSource ( type , STAGE_COMPUTE )
} ,
. stageCount = 1 ,
2023-04-27 04:36:30 +00:00
. flags = & ( ShaderFlag ) { NULL , 0 , state . device . subgroupSize } ,
2024-02-24 22:34:29 +00:00
. flagCount = 1 ,
. isDefault = true
2023-04-27 04:36:30 +00:00
} ) ;
default :
return state . defaultShaders [ type ] = lovrShaderCreate ( & ( ShaderInfo ) {
2024-02-20 23:07:30 +00:00
. type = SHADER_GRAPHICS ,
. stages = ( ShaderSource [ 2 ] ) {
lovrGraphicsGetDefaultShaderSource ( type , STAGE_VERTEX ) ,
lovrGraphicsGetDefaultShaderSource ( type , STAGE_FRAGMENT )
} ,
2024-02-24 22:34:29 +00:00
. stageCount = 2 ,
. isDefault = true
2023-04-27 04:36:30 +00:00
} ) ;
}
2022-05-28 03:47:07 +00:00
}
2022-07-13 07:07:15 +00:00
Shader * lovrShaderCreate ( const ShaderInfo * info ) {
2024-03-11 21:38:00 +00:00
Shader * shader = lovrCalloc ( sizeof ( Shader ) + gpu_sizeof_shader ( ) ) ;
2024-02-24 19:49:11 +00:00
shader - > ref = 1 ;
shader - > gpu = ( gpu_shader * ) ( shader + 1 ) ;
shader - > info = * info ;
2022-05-22 22:09:09 +00:00
2024-02-24 19:49:11 +00:00
// Validate stage combinations
2024-02-20 23:07:30 +00:00
for ( uint32_t i = 0 ; i < info - > stageCount ; i + + ) {
shader - > stageMask | = ( 1 < < info - > stages [ i ] . stage ) ;
2024-01-03 19:18:58 +00:00
}
2024-02-20 23:07:30 +00:00
if ( info - > type = = SHADER_GRAPHICS ) {
2024-02-21 18:37:59 +00:00
lovrCheck ( shader - > stageMask = = ( FLAG_VERTEX | FLAG_FRAGMENT ) , " Graphics shaders must have a vertex and a pixel stage " ) ;
2024-02-20 23:07:30 +00:00
} else if ( info - > type = = SHADER_COMPUTE ) {
lovrCheck ( shader - > stageMask = = FLAG_COMPUTE , " Compute shaders can only have a compute stage " ) ;
}
2024-01-03 19:18:58 +00:00
2024-02-24 19:49:11 +00:00
size_t stack = tempPush ( & state . allocator ) ;
// Copy the source to temp memory (we perform edits on the SPIR-V and the input might be readonly)
2024-02-20 23:07:30 +00:00
void * source [ 2 ] ;
for ( uint32_t i = 0 ; i < info - > stageCount ; i + + ) {
source [ i ] = tempAlloc ( & state . allocator , info - > stages [ i ] . size ) ;
memcpy ( source [ i ] , info - > stages [ i ] . code , info - > stages [ i ] . size ) ;
2024-01-03 19:18:58 +00:00
}
2022-05-22 22:09:09 +00:00
2023-01-16 13:15:13 +00:00
// Parse SPIR-V
2022-05-22 22:09:09 +00:00
spv_result result ;
spv_info spv [ 2 ] = { 0 } ;
2024-01-03 19:18:58 +00:00
uint32_t maxResources = 0 ;
uint32_t maxSpecConstants = 0 ;
uint32_t maxFields = 0 ;
uint32_t maxChars = 0 ;
2024-02-20 23:07:30 +00:00
for ( uint32_t i = 0 ; i < info - > stageCount ; i + + ) {
result = spv_parse ( source [ i ] , info - > stages [ i ] . size , & spv [ i ] ) ;
2022-05-22 22:09:09 +00:00
lovrCheck ( result = = SPV_OK , " Failed to load Shader: %s \n " , spv_result_to_string ( result ) ) ;
2022-06-01 03:47:47 +00:00
lovrCheck ( spv [ i ] . version < = 0x00010300 , " Invalid SPIR-V version (up to 1.3 is supported) " ) ;
2022-05-22 22:09:09 +00:00
2023-04-30 01:33:58 +00:00
spv [ i ] . features = tempAlloc ( & state . allocator , spv [ i ] . featureCount * sizeof ( uint32_t ) ) ;
spv [ i ] . specConstants = tempAlloc ( & state . allocator , spv [ i ] . specConstantCount * sizeof ( spv_spec_constant ) ) ;
spv [ i ] . attributes = tempAlloc ( & state . allocator , spv [ i ] . attributeCount * sizeof ( spv_attribute ) ) ;
spv [ i ] . resources = tempAlloc ( & state . allocator , spv [ i ] . resourceCount * sizeof ( spv_resource ) ) ;
spv [ i ] . fields = tempAlloc ( & state . allocator , spv [ i ] . fieldCount * sizeof ( spv_field ) ) ;
2023-03-22 05:26:14 +00:00
memset ( spv [ i ] . fields , 0 , spv [ i ] . fieldCount * sizeof ( spv_field ) ) ;
2022-05-22 22:09:09 +00:00
2024-02-20 23:07:30 +00:00
result = spv_parse ( source [ i ] , info - > stages [ i ] . size , & spv [ i ] ) ;
2022-05-22 22:09:09 +00:00
lovrCheck ( result = = SPV_OK , " Failed to load Shader: %s \n " , spv_result_to_string ( result ) ) ;
checkShaderFeatures ( spv [ i ] . features , spv [ i ] . featureCount ) ;
2024-01-03 19:18:58 +00:00
maxResources + = spv [ i ] . resourceCount ;
maxSpecConstants + = spv [ i ] . specConstantCount ;
maxFields + = spv [ i ] . fieldCount ;
for ( uint32_t j = 0 ; j < spv [ i ] . fieldCount ; j + + ) {
spv_field * field = & spv [ i ] . fields [ j ] ;
maxChars + = field - > name ? strlen ( field - > name ) + 1 : 0 ;
}
2022-05-22 22:09:09 +00:00
}
2024-02-24 19:49:11 +00:00
// Allocate memory
2024-03-11 21:38:00 +00:00
shader - > resources = lovrMalloc ( maxResources * sizeof ( ShaderResource ) ) ;
shader - > fields = lovrMalloc ( maxFields * sizeof ( DataField ) ) ;
shader - > names = lovrMalloc ( maxChars ) ;
shader - > flags = lovrMalloc ( maxSpecConstants * sizeof ( gpu_shader_flag ) ) ;
shader - > flagLookup = lovrMalloc ( maxSpecConstants * sizeof ( uint32_t ) ) ;
2024-01-03 19:18:58 +00:00
2024-02-24 19:49:11 +00:00
// Workgroup size
2024-02-20 23:07:30 +00:00
if ( info - > type = = SHADER_COMPUTE ) {
2024-02-24 19:49:11 +00:00
uint32_t * workgroupSize = spv [ 0 ] . workgroupSize ;
uint32_t totalWorkgroupSize = workgroupSize [ 0 ] * workgroupSize [ 1 ] * workgroupSize [ 2 ] ;
lovrCheck ( workgroupSize [ 0 ] < = state . limits . workgroupSize [ 0 ] , " Shader workgroup size exceeds the 'workgroupSize' limit " ) ;
lovrCheck ( workgroupSize [ 1 ] < = state . limits . workgroupSize [ 1 ] , " Shader workgroup size exceeds the 'workgroupSize' limit " ) ;
lovrCheck ( workgroupSize [ 2 ] < = state . limits . workgroupSize [ 2 ] , " Shader workgroup size exceeds the 'workgroupSize' limit " ) ;
2022-08-06 20:06:42 +00:00
lovrCheck ( totalWorkgroupSize < = state . limits . totalWorkgroupSize , " Shader workgroup size exceeds the 'totalWorkgroupSize' limit " ) ;
2024-02-24 19:49:11 +00:00
memcpy ( shader - > workgroupSize , workgroupSize , 3 * sizeof ( uint32_t ) ) ;
}
// Vertex attributes
if ( info - > type = = SHADER_GRAPHICS & & spv [ 0 ] . attributeCount > 0 ) {
2023-01-16 13:15:13 +00:00
shader - > attributeCount = spv [ 0 ] . attributeCount ;
2024-03-11 21:38:00 +00:00
shader - > attributes = lovrMalloc ( shader - > attributeCount * sizeof ( ShaderAttribute ) ) ;
2023-01-16 13:15:13 +00:00
for ( uint32_t i = 0 ; i < shader - > attributeCount ; i + + ) {
shader - > attributes [ i ] . location = spv [ 0 ] . attributes [ i ] . location ;
shader - > attributes [ i ] . hash = ( uint32_t ) hash64 ( spv [ 0 ] . attributes [ i ] . name , strlen ( spv [ 0 ] . attributes [ i ] . name ) ) ;
shader - > hasCustomAttributes | = shader - > attributes [ i ] . location < 10 ;
}
2022-07-10 06:39:03 +00:00
}
2024-02-20 23:07:30 +00:00
uint32_t resourceSet = info - > type = = SHADER_COMPUTE ? 0 : 2 ;
uint32_t uniformSet = info - > type = = SHADER_COMPUTE ? 1 : 3 ;
2023-01-16 13:15:13 +00:00
2022-05-22 22:09:09 +00:00
// Resources
2024-02-24 19:49:11 +00:00
for ( uint32_t s = 0 , lastResourceCount = 0 ; s < info - > stageCount ; s + + , lastResourceCount = shader - > resourceCount ) {
2024-02-20 23:07:30 +00:00
ShaderStage stage = info - > stages [ s ] . stage ;
2022-05-22 22:09:09 +00:00
for ( uint32_t i = 0 ; i < spv [ s ] . resourceCount ; i + + ) {
spv_resource * resource = & spv [ s ] . resources [ i ] ;
2024-02-24 19:49:11 +00:00
// It's safe to cast away const because we are operating on a copy of the input
2024-02-05 05:33:42 +00:00
uint32_t * set = ( uint32_t * ) resource - > set ;
uint32_t * binding = ( uint32_t * ) resource - > binding ;
2022-05-22 22:09:09 +00:00
2024-02-24 19:49:11 +00:00
// glslang outputs gl_DefaultUniformBlock, there's also the Constants macro which defines a DefaultUniformBlock UBO
if ( ! strcmp ( resource - > name , " gl_DefaultUniformBlock " ) | | ! strcmp ( resource - > name , " DefaultUniformBlock " ) ) {
spv_field * block = resource - > bufferFields ;
shader - > uniformSize = block - > elementSize ;
shader - > uniformCount = block - > fieldCount ;
shader - > uniforms = shader - > fields + ( ( s = = 1 ? spv [ 0 ] . fieldCount : 0 ) + ( block - > fields - spv [ s ] . fields ) ) ;
* set = uniformSet ;
* binding = 0 ;
2022-05-22 22:09:09 +00:00
continue ;
}
2024-02-24 19:49:11 +00:00
// Skip builtin resources
if ( info - > type = = SHADER_GRAPHICS & & ( ( * set = = 0 & & * binding < = LAST_BUILTIN_BINDING ) | | * set = = 1 ) ) {
2024-02-05 05:33:42 +00:00
continue ;
}
2023-03-20 20:15:47 +00:00
2024-02-24 19:49:11 +00:00
static const gpu_slot_type types [ ] = {
2022-05-22 22:09:09 +00:00
[ SPV_UNIFORM_BUFFER ] = GPU_SLOT_UNIFORM_BUFFER ,
[ SPV_STORAGE_BUFFER ] = GPU_SLOT_STORAGE_BUFFER ,
[ SPV_SAMPLED_TEXTURE ] = GPU_SLOT_SAMPLED_TEXTURE ,
[ SPV_STORAGE_TEXTURE ] = GPU_SLOT_STORAGE_TEXTURE ,
[ SPV_SAMPLER ] = GPU_SLOT_SAMPLER
} ;
2024-02-24 19:49:11 +00:00
gpu_phase phases [ ] = {
2024-01-03 19:18:58 +00:00
[ STAGE_VERTEX ] = GPU_PHASE_SHADER_VERTEX ,
[ STAGE_FRAGMENT ] = GPU_PHASE_SHADER_FRAGMENT ,
[ STAGE_COMPUTE ] = GPU_PHASE_SHADER_COMPUTE
} ;
2024-02-24 19:49:11 +00:00
gpu_slot_type type = types [ resource - > type ] ;
gpu_phase phase = phases [ stage ] ;
2022-05-22 22:09:09 +00:00
2024-02-24 19:49:11 +00:00
// Merge resources between shader stages, by name
bool merged = false ;
uint32_t hash = ( uint32_t ) hash64 ( resource - > name , strlen ( resource - > name ) ) ;
2024-02-05 05:33:42 +00:00
for ( uint32_t j = 0 ; j < lastResourceCount ; j + + ) {
2023-11-28 19:12:29 +00:00
ShaderResource * other = & shader - > resources [ j ] ;
2024-02-05 05:33:42 +00:00
if ( other - > hash = = hash ) {
2024-02-24 19:49:11 +00:00
lovrCheck ( other - > type = = type , " Shader variable '%s' is declared in multiple shader stages with different types " , resource - > name ) ;
2024-02-20 23:07:30 +00:00
* set = resourceSet ;
2024-02-24 22:34:29 +00:00
* binding = shader - > resources [ j ] . binding ;
shader - > resources [ j ] . phase | = phase ;
2024-02-24 19:49:11 +00:00
merged = true ;
2023-11-28 19:12:29 +00:00
break ;
2022-05-22 22:09:09 +00:00
}
}
2024-02-24 19:49:11 +00:00
if ( merged ) {
2022-05-22 22:09:09 +00:00
continue ;
}
uint32_t index = shader - > resourceCount + + ;
2024-02-24 22:34:29 +00:00
lovrCheck ( index < MAX_SHADER_RESOURCES , " Shader resource count exceeds resourcesPerShader limit (%d) " , MAX_SHADER_RESOURCES ) ;
2024-02-24 19:49:11 +00:00
lovrCheck ( resource - > type ! = SPV_COMBINED_TEXTURE_SAMPLER , " Shader variable '%s' is a%s, which is not supported%s " , resource - > name , " combined texture sampler " , " (use e.g. texture2D instead of sampler2D) " ) ;
lovrCheck ( resource - > type ! = SPV_UNIFORM_TEXEL_BUFFER , " Shader variable '%s' is a%s, which is not supported%s " , resource - > name , " uniform texel buffer " , " " ) ;
lovrCheck ( resource - > type ! = SPV_STORAGE_TEXEL_BUFFER , " Shader variable '%s' is a%s, which is not supported%s " , resource - > name , " storage texel buffer " , " " ) ;
lovrCheck ( resource - > type ! = SPV_INPUT_ATTACHMENT , " Shader variable '%s' is a%s, which is not supported%s " , resource - > name , " n input attachment " , " " ) ;
2024-02-24 22:34:29 +00:00
lovrCheck ( resource - > arraySize = = 0 , " Arrays of resources in shaders are not currently supported " ) ;
2022-05-22 22:09:09 +00:00
2024-02-24 22:34:29 +00:00
// Move resources into set #2 and give them auto-incremented binding numbers starting at zero
// Compute shaders don't need remapping since everything's in set #0 and there are no builtins
if ( ! info - > isDefault & & info - > type = = SHADER_GRAPHICS & & * set = = 0 & & * binding > LAST_BUILTIN_BINDING ) {
* set = resourceSet ;
* binding = index ;
2022-06-01 03:16:16 +00:00
}
2024-02-24 19:49:11 +00:00
bool buffer = resource - > type = = SPV_UNIFORM_BUFFER | | resource - > type = = SPV_STORAGE_BUFFER ;
bool texture = resource - > type = = SPV_SAMPLED_TEXTURE | | resource - > type = = SPV_STORAGE_TEXTURE ;
bool sampler = resource - > type = = SPV_SAMPLER ;
bool storage = resource - > type = = SPV_STORAGE_BUFFER | | resource - > type = = SPV_STORAGE_TEXTURE ;
2022-06-01 03:24:43 +00:00
2024-02-24 22:34:29 +00:00
shader - > bufferMask | = ( buffer < < index ) ;
shader - > textureMask | = ( texture < < index ) ;
shader - > samplerMask | = ( sampler < < index ) ;
shader - > storageMask | = ( storage < < index ) ;
gpu_cache cache ;
2024-02-24 19:49:11 +00:00
if ( storage ) {
2024-02-24 22:34:29 +00:00
cache = info - > type = = SHADER_COMPUTE ? GPU_CACHE_STORAGE_WRITE : GPU_CACHE_STORAGE_READ ;
2024-02-24 19:49:11 +00:00
} else {
2024-02-24 22:34:29 +00:00
cache = texture ? GPU_CACHE_TEXTURE : GPU_CACHE_UNIFORM ;
2024-02-24 19:49:11 +00:00
}
2022-05-22 22:09:09 +00:00
shader - > resources [ index ] = ( ShaderResource ) {
. hash = hash ,
2024-02-24 23:33:09 +00:00
. binding = * binding ,
2024-02-24 22:34:29 +00:00
. type = type ,
. phase = phase ,
. cache = cache
2022-05-22 22:09:09 +00:00
} ;
2022-05-24 05:32:36 +00:00
2024-02-24 19:49:11 +00:00
if ( buffer & & resource - > bufferFields ) {
2024-02-19 04:35:56 +00:00
spv_field * field = & resource - > bufferFields [ 0 ] ;
2023-08-25 20:42:09 +00:00
2024-04-16 22:04:20 +00:00
// Struct containing single item gets unwrapped
if ( field - > fieldCount = = 1 ) {
2023-08-25 20:42:09 +00:00
field = & field - > fields [ 0 ] ;
}
shader - > resources [ index ] . fieldCount = field - > totalFieldCount + 1 ;
shader - > resources [ index ] . format = shader - > fields + ( ( s = = 1 ? spv [ 0 ] . fieldCount : 0 ) + ( field - spv [ s ] . fields ) ) ;
2023-01-16 13:15:13 +00:00
}
2022-05-22 22:09:09 +00:00
}
}
2023-01-16 13:15:13 +00:00
// Fields
char * name = shader - > names ;
2024-02-20 23:07:30 +00:00
for ( uint32_t s = 0 ; s < info - > stageCount ; s + + ) {
2023-01-16 13:15:13 +00:00
for ( uint32_t i = 0 ; i < spv [ s ] . fieldCount ; i + + ) {
2023-06-24 02:11:30 +00:00
static const DataType dataTypes [ ] = {
[ SPV_B32 ] = TYPE_U32 ,
[ SPV_I32 ] = TYPE_I32 ,
[ SPV_I32x2 ] = TYPE_I32x2 ,
[ SPV_I32x3 ] = TYPE_I32x3 ,
[ SPV_I32x4 ] = TYPE_I32x4 ,
[ SPV_U32 ] = TYPE_U32 ,
[ SPV_U32x2 ] = TYPE_U32x2 ,
[ SPV_U32x3 ] = TYPE_U32x3 ,
[ SPV_U32x4 ] = TYPE_U32x4 ,
[ SPV_F32 ] = TYPE_F32 ,
[ SPV_F32x2 ] = TYPE_F32x2 ,
[ SPV_F32x3 ] = TYPE_F32x3 ,
[ SPV_F32x4 ] = TYPE_F32x4 ,
2023-08-02 01:45:37 +00:00
[ SPV_MAT2x2 ] = TYPE_MAT2 ,
[ SPV_MAT2x3 ] = ~ 0u ,
[ SPV_MAT2x4 ] = ~ 0u ,
[ SPV_MAT3x2 ] = ~ 0u ,
[ SPV_MAT3x3 ] = TYPE_MAT3 ,
[ SPV_MAT3x4 ] = ~ 0u ,
[ SPV_MAT4x2 ] = ~ 0u ,
[ SPV_MAT4x3 ] = ~ 0u ,
[ SPV_MAT4x4 ] = TYPE_MAT4 ,
[ SPV_STRUCT ] = ~ 0u
2023-01-16 13:15:13 +00:00
} ;
spv_field * field = & spv [ s ] . fields [ i ] ;
uint32_t base = s = = 1 ? spv [ 0 ] . fieldCount : 0 ;
2023-06-24 04:14:19 +00:00
shader - > fields [ base + i ] = ( DataField ) {
2023-08-02 01:45:37 +00:00
. type = dataTypes [ field - > type ] ,
2023-01-16 13:15:13 +00:00
. offset = field - > offset ,
. length = field - > arrayLength ,
2023-08-25 20:42:09 +00:00
. stride = field - > arrayLength > 0 ? field - > arrayStride : field - > elementSize , // Use stride as element size for non-arrays
. fieldCount = field - > fieldCount ,
2024-02-24 19:49:11 +00:00
. fields = field - > fields ? shader - > fields + base + ( field - > fields - spv [ s ] . fields ) : NULL
2023-01-16 13:15:13 +00:00
} ;
if ( field - > name ) {
size_t length = strlen ( field - > name ) ;
memcpy ( name , field - > name , length ) ;
shader - > fields [ base + i ] . hash = ( uint32_t ) hash64 ( name , length ) ;
shader - > fields [ base + i ] . name = name ;
name [ length ] = ' \0 ' ;
name + = length + 1 ;
}
}
}
2022-05-22 22:09:09 +00:00
// Specialization constants
2024-02-20 23:07:30 +00:00
for ( uint32_t s = 0 ; s < info - > stageCount ; s + + ) {
2022-05-22 22:09:09 +00:00
for ( uint32_t i = 0 ; i < spv [ s ] . specConstantCount ; i + + ) {
spv_spec_constant * constant = & spv [ s ] . specConstants [ i ] ;
bool append = true ;
if ( s > 0 ) {
for ( uint32_t j = 0 ; j < spv [ 0 ] . specConstantCount ; j + + ) {
spv_spec_constant * other = & spv [ 0 ] . specConstants [ j ] ;
if ( other - > id = = constant - > id ) {
lovrCheck ( other - > type = = constant - > type , " Shader flag (%d) does not use a consistent type " , constant - > id ) ;
lovrCheck ( ! strcmp ( constant - > name , other - > name ) , " Shader flag (%d) does not use a consistent name " , constant - > id ) ;
append = false ;
break ;
}
}
}
if ( ! append ) {
break ;
}
static const gpu_flag_type flagTypes [ ] = {
[ SPV_B32 ] = GPU_FLAG_B32 ,
[ SPV_I32 ] = GPU_FLAG_I32 ,
[ SPV_U32 ] = GPU_FLAG_U32 ,
[ SPV_F32 ] = GPU_FLAG_F32
} ;
uint32_t index = shader - > flagCount + + ;
2022-07-04 00:26:31 +00:00
2022-08-26 17:22:53 +00:00
// Flag names can start with flag_ which will be ignored for matching purposes
2022-07-04 00:26:31 +00:00
if ( constant - > name ) {
2022-08-26 17:22:53 +00:00
size_t length = strlen ( constant - > name ) ;
size_t offset = length > 5 & & ! memcmp ( constant - > name , " flag_ " , 5 ) ? 5 : 0 ;
shader - > flagLookup [ index ] = ( uint32_t ) hash64 ( constant - > name + offset , length - offset ) ;
2022-07-04 00:26:31 +00:00
} else {
shader - > flagLookup [ index ] = 0 ;
}
2022-05-22 22:09:09 +00:00
shader - > flags [ index ] = ( gpu_shader_flag ) {
. id = constant - > id ,
. type = flagTypes [ constant - > type ]
} ;
}
}
2024-02-24 19:49:11 +00:00
// Layout
gpu_slot * slots = tempAlloc ( & state . allocator , shader - > resourceCount * sizeof ( gpu_slot ) ) ;
for ( uint32_t i = 0 ; i < shader - > resourceCount ; i + + ) {
ShaderResource * resource = & shader - > resources [ i ] ;
slots [ i ] = ( gpu_slot ) {
. number = resource - > binding ,
. type = resource - > type ,
. stages =
( ( resource - > phase & GPU_PHASE_SHADER_VERTEX ) ? GPU_STAGE_VERTEX : 0 ) |
( ( resource - > phase & GPU_PHASE_SHADER_FRAGMENT ) ? GPU_STAGE_FRAGMENT : 0 ) |
( ( resource - > phase & GPU_PHASE_SHADER_COMPUTE ) ? GPU_STAGE_COMPUTE : 0 )
} ;
2024-02-20 23:07:30 +00:00
}
2022-05-24 04:40:57 +00:00
shader - > layout = getLayout ( slots , shader - > resourceCount ) ;
2022-05-09 18:47:06 +00:00
gpu_shader_info gpu = {
2024-02-24 19:49:11 +00:00
. stageCount = info - > stageCount ,
. stages = tempAlloc ( & state . allocator , info - > stageCount * sizeof ( gpu_shader_source ) ) ,
2022-05-22 22:09:09 +00:00
. label = info - > label
2022-05-09 18:47:06 +00:00
} ;
2024-02-20 23:07:30 +00:00
for ( uint32_t i = 0 ; i < info - > stageCount ; i + + ) {
2024-02-24 19:49:11 +00:00
const uint32_t stageMap [ ] = {
[ STAGE_VERTEX ] = GPU_STAGE_VERTEX ,
[ STAGE_FRAGMENT ] = GPU_STAGE_FRAGMENT ,
[ STAGE_COMPUTE ] = GPU_STAGE_COMPUTE
} ;
gpu . stages [ i ] = ( gpu_shader_source ) {
. stage = stageMap [ info - > stages [ i ] . stage ] ,
. code = source [ i ] ,
. length = info - > stages [ i ] . size
} ;
}
for ( uint32_t i = 0 ; i < info - > stageCount ; i + + ) {
if ( spv [ i ] . pushConstants ) {
gpu . pushConstantSize = MAX ( gpu . pushConstantSize , spv [ i ] . pushConstants - > elementSize ) ;
2024-02-20 23:07:30 +00:00
}
2022-05-24 04:40:57 +00:00
}
2024-02-24 19:49:11 +00:00
gpu_layout * resourceLayout = state . layouts . data [ shader - > layout ] . gpu ;
gpu_layout * uniformsLayout = shader - > uniformSize > 0 ? state . layouts . data [ LAYOUT_UNIFORMS ] . gpu : NULL ;
2024-02-20 23:07:30 +00:00
if ( info - > type = = SHADER_GRAPHICS ) {
2024-02-24 19:49:11 +00:00
gpu . layouts [ 0 ] = state . layouts . data [ LAYOUT_BUILTINS ] . gpu ;
2023-04-15 04:27:47 +00:00
gpu . layouts [ 1 ] = state . layouts . data [ LAYOUT_MATERIAL ] . gpu ;
2024-02-24 19:49:11 +00:00
gpu . layouts [ 2 ] = resourceLayout ;
gpu . layouts [ 3 ] = uniformsLayout ;
} else {
gpu . layouts [ 0 ] = resourceLayout ;
gpu . layouts [ 1 ] = uniformsLayout ;
2022-05-24 04:40:57 +00:00
}
2022-05-09 18:47:06 +00:00
gpu_shader_init ( shader - > gpu , & gpu ) ;
2022-05-22 22:09:09 +00:00
lovrShaderInit ( shader ) ;
2024-02-20 23:07:30 +00:00
tempPop ( & state . allocator , stack ) ;
2022-05-22 22:10:07 +00:00
return shader ;
}
2022-05-09 18:47:06 +00:00
2022-05-22 22:10:07 +00:00
Shader * lovrShaderClone ( Shader * parent , ShaderFlag * flags , uint32_t count ) {
2024-03-11 21:38:00 +00:00
Shader * shader = lovrCalloc ( sizeof ( Shader ) + gpu_sizeof_shader ( ) ) ;
2022-05-22 22:10:07 +00:00
shader - > ref = 1 ;
lovrRetain ( parent ) ;
shader - > parent = parent ;
shader - > gpu = parent - > gpu ;
shader - > info = parent - > info ;
shader - > info . flags = flags ;
shader - > info . flagCount = count ;
shader - > layout = parent - > layout ;
2024-01-07 21:43:22 +00:00
shader - > stageMask = parent - > stageMask ;
2022-05-24 05:32:36 +00:00
shader - > bufferMask = parent - > bufferMask ;
shader - > textureMask = parent - > textureMask ;
shader - > samplerMask = parent - > samplerMask ;
shader - > storageMask = parent - > storageMask ;
2024-02-20 23:07:30 +00:00
shader - > uniformSize = parent - > uniformSize ;
shader - > uniformCount = parent - > uniformCount ;
2022-05-22 22:10:07 +00:00
shader - > resourceCount = parent - > resourceCount ;
shader - > flagCount = parent - > flagCount ;
2024-01-08 01:06:26 +00:00
shader - > attributes = parent - > attributes ;
2022-05-22 22:10:07 +00:00
shader - > resources = parent - > resources ;
2024-02-20 23:07:30 +00:00
shader - > uniforms = parent - > uniforms ;
2024-01-08 01:06:26 +00:00
shader - > fields = parent - > fields ;
shader - > names = parent - > names ;
2024-03-11 21:38:00 +00:00
shader - > flags = lovrMalloc ( shader - > flagCount * sizeof ( gpu_shader_flag ) ) ;
shader - > flagLookup = lovrMalloc ( shader - > flagCount * sizeof ( uint32_t ) ) ;
2022-05-22 22:10:07 +00:00
memcpy ( shader - > flags , parent - > flags , shader - > flagCount * sizeof ( gpu_shader_flag ) ) ;
memcpy ( shader - > flagLookup , parent - > flagLookup , shader - > flagCount * sizeof ( uint32_t ) ) ;
lovrShaderInit ( shader ) ;
2022-05-09 18:47:06 +00:00
return shader ;
}
void lovrShaderDestroy ( void * ref ) {
Shader * shader = ref ;
2024-01-08 01:06:26 +00:00
if ( shader - > parent ) {
lovrRelease ( shader - > parent , lovrShaderDestroy ) ;
} else {
gpu_shader_destroy ( shader - > gpu ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( shader - > attributes ) ;
lovrFree ( shader - > resources ) ;
lovrFree ( shader - > fields ) ;
lovrFree ( shader - > names ) ;
2024-01-08 01:06:26 +00:00
}
2024-03-11 21:38:00 +00:00
lovrFree ( shader - > flags ) ;
lovrFree ( shader - > flagLookup ) ;
lovrFree ( shader ) ;
2022-05-09 18:47:06 +00:00
}
const ShaderInfo * lovrShaderGetInfo ( Shader * shader ) {
return & shader - > info ;
}
2022-07-04 05:59:49 +00:00
bool lovrShaderHasStage ( Shader * shader , ShaderStage stage ) {
2024-01-03 19:18:58 +00:00
return shader - > stageMask & ( 1 < < stage ) ;
2022-07-04 05:59:49 +00:00
}
2022-07-04 06:04:56 +00:00
bool lovrShaderHasAttribute ( Shader * shader , const char * name , uint32_t location ) {
2023-06-24 18:26:09 +00:00
if ( name ) {
uint32_t hash = ( uint32_t ) hash64 ( name , strlen ( name ) ) ;
for ( uint32_t i = 0 ; i < shader - > attributeCount ; i + + ) {
if ( shader - > attributes [ i ] . hash = = hash ) {
return true ;
}
}
} else {
for ( uint32_t i = 0 ; i < shader - > attributeCount ; i + + ) {
if ( shader - > attributes [ i ] . location = = location ) {
return true ;
}
2022-07-04 06:04:56 +00:00
}
}
2023-08-25 20:42:09 +00:00
2022-07-04 06:04:56 +00:00
return false ;
}
2022-08-06 20:06:42 +00:00
void lovrShaderGetWorkgroupSize ( Shader * shader , uint32_t size [ 3 ] ) {
memcpy ( size , shader - > workgroupSize , 3 * sizeof ( uint32_t ) ) ;
2022-07-10 06:39:03 +00:00
}
2023-08-25 20:42:09 +00:00
const DataField * lovrShaderGetBufferFormat ( Shader * shader , const char * name , uint32_t * fieldCount ) {
uint32_t hash = ( uint32_t ) hash64 ( name , strlen ( name ) ) ;
ShaderResource * resource = shader - > resources ;
for ( uint32_t i = 0 ; i < shader - > resourceCount ; i + + , resource + + ) {
if ( resource - > hash = = hash & & ( shader - > bufferMask & ( 1u < < resource - > binding ) ) ) {
* fieldCount = resource - > fieldCount ;
return resource - > format ;
2023-01-16 13:15:13 +00:00
}
}
return NULL ;
}
2022-06-17 06:49:09 +00:00
// Material
2022-07-13 07:07:15 +00:00
Material * lovrMaterialCreate ( const MaterialInfo * info ) {
2023-10-05 19:21:27 +00:00
MaterialBlock * block = state . materialBlocks . length > 0 ? & state . materialBlocks . data [ state . materialBlock ] : NULL ;
2022-08-23 03:30:09 +00:00
const uint32_t MATERIALS_PER_BLOCK = 256 ;
2022-06-17 06:49:09 +00:00
2022-07-14 07:05:58 +00:00
if ( ! block | | block - > head = = ~ 0u | | ! gpu_is_complete ( block - > list [ block - > head ] . tick ) ) {
2022-06-17 06:49:09 +00:00
bool found = false ;
for ( size_t i = 0 ; i < state . materialBlocks . length ; i + + ) {
block = & state . materialBlocks . data [ i ] ;
2022-07-14 07:05:58 +00:00
if ( block - > head ! = ~ 0u & & gpu_is_complete ( block - > list [ block - > head ] . tick ) ) {
2022-06-18 00:43:26 +00:00
state . materialBlock = i ;
2022-06-17 06:49:09 +00:00
found = true ;
break ;
}
}
if ( ! found ) {
arr_expand ( & state . materialBlocks , 1 ) ;
2022-08-07 01:05:30 +00:00
lovrAssert ( state . materialBlocks . length < UINT16_MAX , " Out of memory " ) ;
2022-06-18 00:43:26 +00:00
state . materialBlock = state . materialBlocks . length + + ;
block = & state . materialBlocks . data [ state . materialBlock ] ;
2024-03-11 21:38:00 +00:00
block - > list = lovrMalloc ( MATERIALS_PER_BLOCK * sizeof ( Material ) ) ;
block - > bundlePool = lovrMalloc ( gpu_sizeof_bundle_pool ( ) ) ;
block - > bundles = lovrMalloc ( MATERIALS_PER_BLOCK * gpu_sizeof_bundle ( ) ) ;
2022-06-17 06:49:09 +00:00
for ( uint32_t i = 0 ; i < MATERIALS_PER_BLOCK ; i + + ) {
block - > list [ i ] . next = i + 1 ;
block - > list [ i ] . tick = state . tick - 4 ;
2022-08-07 01:05:30 +00:00
block - > list [ i ] . block = ( uint16_t ) state . materialBlock ;
2022-06-17 06:49:09 +00:00
block - > list [ i ] . index = i ;
block - > list [ i ] . bundle = ( gpu_bundle * ) ( ( char * ) block - > bundles + i * gpu_sizeof_bundle ( ) ) ;
2023-10-05 19:21:27 +00:00
block - > list [ i ] . hasWritableTexture = false ;
2022-06-17 06:49:09 +00:00
}
block - > list [ MATERIALS_PER_BLOCK - 1 ] . next = ~ 0u ;
block - > tail = MATERIALS_PER_BLOCK - 1 ;
block - > head = 0 ;
2023-12-30 22:17:20 +00:00
size_t align = state . limits . uniformBufferAlign ;
size_t bufferSize = MATERIALS_PER_BLOCK * ALIGN ( sizeof ( MaterialData ) , align ) ;
block - > view = getBuffer ( GPU_BUFFER_STATIC , bufferSize , align ) ;
atomic_fetch_add ( & block - > view . block - > ref , 1 ) ;
2022-06-17 06:49:09 +00:00
gpu_bundle_pool_info poolInfo = {
. bundles = block - > bundles ,
2023-04-15 04:27:47 +00:00
. layout = state . layouts . data [ LAYOUT_MATERIAL ] . gpu ,
2022-06-17 06:49:09 +00:00
. count = MATERIALS_PER_BLOCK
} ;
gpu_bundle_pool_init ( block - > bundlePool , & poolInfo ) ;
}
}
Material * material = & block - > list [ block - > head ] ;
block - > head = material - > next ;
material - > next = ~ 0u ;
material - > ref = 1 ;
2022-06-18 00:43:26 +00:00
material - > info = * info ;
2022-06-17 06:49:09 +00:00
MaterialData * data ;
uint32_t stride = ALIGN ( sizeof ( MaterialData ) , state . limits . uniformBufferAlign ) ;
2023-12-30 22:17:20 +00:00
if ( block - > view . pointer ) {
data = ( MaterialData * ) ( ( char * ) block - > view . pointer + material - > index * stride ) ;
2022-06-17 06:49:09 +00:00
} else {
beginFrame ( ) ;
2023-12-30 22:17:20 +00:00
BufferView staging = getBuffer ( GPU_BUFFER_UPLOAD , sizeof ( MaterialData ) , 4 ) ;
gpu_copy_buffers ( state . stream , staging . buffer , block - > view . buffer , staging . offset , block - > view . offset + stride * material - > index , sizeof ( MaterialData ) ) ;
2024-03-02 01:13:06 +00:00
state . barrier . prev | = GPU_PHASE_COPY ;
state . barrier . next | = GPU_PHASE_SHADER_VERTEX | GPU_PHASE_SHADER_FRAGMENT ;
state . barrier . flush | = GPU_CACHE_TRANSFER_WRITE ;
state . barrier . clear | = GPU_CACHE_UNIFORM ;
2023-12-30 22:17:20 +00:00
data = staging . pointer ;
2022-06-17 06:49:09 +00:00
}
memcpy ( data , info , sizeof ( MaterialData ) ) ;
gpu_buffer_binding buffer = {
2023-12-30 22:17:20 +00:00
. object = block - > view . buffer ,
. offset = block - > view . offset + material - > index * stride ,
2022-06-17 06:49:09 +00:00
. extent = stride
} ;
gpu_binding bindings [ 8 ] = {
{ 0 , GPU_SLOT_UNIFORM_BUFFER , . buffer = buffer }
} ;
Texture * textures [ ] = {
info - > texture ,
info - > glowTexture ,
info - > metalnessTexture ,
info - > roughnessTexture ,
info - > clearcoatTexture ,
2022-09-02 22:33:18 +00:00
info - > occlusionTexture ,
2022-06-17 06:49:09 +00:00
info - > normalTexture
} ;
for ( uint32_t i = 0 ; i < COUNTOF ( textures ) ; i + + ) {
2022-06-18 00:43:26 +00:00
lovrRetain ( textures [ i ] ) ;
2022-06-17 06:49:09 +00:00
Texture * texture = textures [ i ] ? textures [ i ] : state . defaultTexture ;
2022-07-04 07:18:38 +00:00
lovrCheck ( i = = 0 | | texture - > info . type = = TEXTURE_2D , " Material textures must be 2D " ) ;
2022-11-15 04:10:23 +00:00
lovrCheck ( texture - > info . usage & TEXTURE_SAMPLE , " Textures must be created with the 'sample' usage to use them in Materials " ) ;
2022-06-17 06:49:09 +00:00
bindings [ i + 1 ] = ( gpu_binding ) { i + 1 , GPU_SLOT_SAMPLED_TEXTURE , . texture = texture - > gpu } ;
2022-08-06 07:50:25 +00:00
material - > hasWritableTexture | = texture - > info . usage ! = TEXTURE_SAMPLE ;
2022-06-17 06:49:09 +00:00
}
gpu_bundle_info bundleInfo = {
2023-04-15 04:27:47 +00:00
. layout = state . layouts . data [ LAYOUT_MATERIAL ] . gpu ,
2022-06-17 06:49:09 +00:00
. bindings = bindings ,
. count = COUNTOF ( bindings )
} ;
gpu_bundle_write ( & material - > bundle , & bundleInfo , 1 ) ;
return material ;
}
void lovrMaterialDestroy ( void * ref ) {
Material * material = ref ;
MaterialBlock * block = & state . materialBlocks . data [ material - > block ] ;
material - > tick = state . tick ;
block - > tail = material - > index ;
if ( block - > head = = ~ 0u ) block - > head = block - > tail ;
lovrRelease ( material - > info . texture , lovrTextureDestroy ) ;
lovrRelease ( material - > info . glowTexture , lovrTextureDestroy ) ;
2022-06-18 00:43:26 +00:00
lovrRelease ( material - > info . metalnessTexture , lovrTextureDestroy ) ;
lovrRelease ( material - > info . roughnessTexture , lovrTextureDestroy ) ;
lovrRelease ( material - > info . clearcoatTexture , lovrTextureDestroy ) ;
2022-09-02 22:33:18 +00:00
lovrRelease ( material - > info . occlusionTexture , lovrTextureDestroy ) ;
2022-06-18 00:43:26 +00:00
lovrRelease ( material - > info . normalTexture , lovrTextureDestroy ) ;
2022-06-17 06:49:09 +00:00
}
const MaterialInfo * lovrMaterialGetInfo ( Material * material ) {
return & material - > info ;
}
2022-06-19 00:43:12 +00:00
// Font
2023-04-26 04:45:30 +00:00
Font * lovrGraphicsGetDefaultFont ( void ) {
2022-07-30 22:20:01 +00:00
if ( ! state . defaultFont ) {
Rasterizer * rasterizer = lovrRasterizerCreate ( NULL , 32 ) ;
state . defaultFont = lovrFontCreate ( & ( FontInfo ) {
. rasterizer = rasterizer ,
. spread = 4.
} ) ;
lovrRelease ( rasterizer , lovrRasterizerDestroy ) ;
}
return state . defaultFont ;
}
2022-07-13 07:07:15 +00:00
Font * lovrFontCreate ( const FontInfo * info ) {
2024-03-11 21:38:00 +00:00
Font * font = lovrCalloc ( sizeof ( Font ) ) ;
2022-06-19 00:43:12 +00:00
font - > ref = 1 ;
font - > info = * info ;
lovrRetain ( info - > rasterizer ) ;
2024-03-27 19:48:35 +00:00
arr_init ( & font - > glyphs ) ;
2022-06-21 01:26:15 +00:00
map_init ( & font - > glyphLookup , 36 ) ;
map_init ( & font - > kerning , 36 ) ;
2022-06-29 03:18:45 +00:00
font - > pixelDensity = lovrRasterizerGetLeading ( info - > rasterizer ) ;
2022-06-27 04:22:45 +00:00
font - > lineSpacing = 1.f ;
font - > padding = ( uint32_t ) ceil ( info - > spread / 2. ) ;
2022-06-21 01:26:15 +00:00
// Initial atlas size must be big enough to hold any of the glyphs
float box [ 4 ] ;
font - > atlasWidth = 1 ;
font - > atlasHeight = 1 ;
lovrRasterizerGetBoundingBox ( info - > rasterizer , box ) ;
2022-06-27 04:22:45 +00:00
uint32_t maxWidth = ( uint32_t ) ceilf ( box [ 2 ] - box [ 0 ] ) + 2 * font - > padding ;
uint32_t maxHeight = ( uint32_t ) ceilf ( box [ 3 ] - box [ 1 ] ) + 2 * font - > padding ;
while ( font - > atlasWidth < 2 * maxWidth | | font - > atlasHeight < 2 * maxHeight ) {
2022-06-21 01:26:15 +00:00
font - > atlasWidth < < = 1 ;
font - > atlasHeight < < = 1 ;
}
2022-06-19 00:43:12 +00:00
return font ;
}
void lovrFontDestroy ( void * ref ) {
Font * font = ref ;
lovrRelease ( font - > info . rasterizer , lovrRasterizerDestroy ) ;
lovrRelease ( font - > material , lovrMaterialDestroy ) ;
lovrRelease ( font - > atlas , lovrTextureDestroy ) ;
arr_free ( & font - > glyphs ) ;
2022-06-21 01:26:15 +00:00
map_free ( & font - > glyphLookup ) ;
map_free ( & font - > kerning ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( font ) ;
2022-06-19 00:43:12 +00:00
}
2022-06-21 22:28:03 +00:00
const FontInfo * lovrFontGetInfo ( Font * font ) {
return & font - > info ;
}
2022-06-19 00:43:12 +00:00
float lovrFontGetPixelDensity ( Font * font ) {
return font - > pixelDensity ;
}
void lovrFontSetPixelDensity ( Font * font , float pixelDensity ) {
font - > pixelDensity = pixelDensity ;
}
2022-06-26 02:54:13 +00:00
float lovrFontGetLineSpacing ( Font * font ) {
return font - > lineSpacing ;
}
void lovrFontSetLineSpacing ( Font * font , float spacing ) {
font - > lineSpacing = spacing ;
}
2022-06-30 03:17:26 +00:00
static Glyph * lovrFontGetGlyph ( Font * font , uint32_t codepoint , bool * resized ) {
2022-06-25 22:26:42 +00:00
uint64_t hash = hash64 ( & codepoint , 4 ) ;
uint64_t index = map_get ( & font - > glyphLookup , hash ) ;
if ( index ! = MAP_NIL ) {
2022-06-30 03:17:26 +00:00
if ( resized ) * resized = false ;
2022-06-25 22:26:42 +00:00
return & font - > glyphs . data [ index ] ;
}
arr_expand ( & font - > glyphs , 1 ) ;
map_set ( & font - > glyphLookup , hash , font - > glyphs . length ) ;
Glyph * glyph = & font - > glyphs . data [ font - > glyphs . length + + ] ;
glyph - > codepoint = codepoint ;
2022-06-29 03:18:45 +00:00
glyph - > advance = lovrRasterizerGetAdvance ( font - > info . rasterizer , codepoint ) ;
2022-06-25 22:26:42 +00:00
if ( lovrRasterizerIsGlyphEmpty ( font - > info . rasterizer , codepoint ) ) {
2022-06-27 03:28:30 +00:00
memset ( glyph - > box , 0 , sizeof ( glyph - > box ) ) ;
2022-06-30 03:17:26 +00:00
if ( resized ) * resized = false ;
2022-06-25 22:26:42 +00:00
return glyph ;
}
lovrRasterizerGetGlyphBoundingBox ( font - > info . rasterizer , codepoint , glyph - > box ) ;
float width = glyph - > box [ 2 ] - glyph - > box [ 0 ] ;
float height = glyph - > box [ 3 ] - glyph - > box [ 1 ] ;
2022-06-27 03:28:30 +00:00
uint32_t pixelWidth = 2 * font - > padding + ( uint32_t ) ceilf ( width ) ;
uint32_t pixelHeight = 2 * font - > padding + ( uint32_t ) ceilf ( height ) ;
2022-06-25 22:26:42 +00:00
2022-06-27 04:22:45 +00:00
// If the glyph exceeds the width, start a new row
2022-06-25 22:26:42 +00:00
if ( font - > atlasX + pixelWidth > font - > atlasWidth ) {
font - > atlasX = font - > atlasWidth = = font - > atlasHeight ? 0 : font - > atlasWidth > > 1 ;
font - > atlasY + = font - > rowHeight ;
2022-06-27 04:22:45 +00:00
}
// If the glyph exceeds the height, expand the atlas
if ( font - > atlasY + pixelHeight > font - > atlasHeight ) {
if ( font - > atlasWidth = = font - > atlasHeight ) {
font - > atlasX = font - > atlasWidth ;
font - > atlasY = 0 ;
font - > atlasWidth < < = 1 ;
font - > rowHeight = 0 ;
} else {
font - > atlasX = 0 ;
font - > atlasY = font - > atlasHeight ;
font - > atlasHeight < < = 1 ;
font - > rowHeight = 0 ;
2022-06-25 22:26:42 +00:00
}
}
2022-06-27 03:28:30 +00:00
glyph - > x = font - > atlasX + font - > padding ;
glyph - > y = font - > atlasY + font - > padding ;
glyph - > uv [ 0 ] = ( uint16_t ) ( ( float ) glyph - > x / font - > atlasWidth * 65535.f + .5f ) ;
2022-09-23 03:30:04 +00:00
glyph - > uv [ 1 ] = ( uint16_t ) ( ( float ) ( glyph - > y + height ) / font - > atlasHeight * 65535.f + .5f ) ;
2022-06-27 03:28:30 +00:00
glyph - > uv [ 2 ] = ( uint16_t ) ( ( float ) ( glyph - > x + width ) / font - > atlasWidth * 65535.f + .5f ) ;
2022-09-23 03:30:04 +00:00
glyph - > uv [ 3 ] = ( uint16_t ) ( ( float ) glyph - > y / font - > atlasHeight * 65535.f + .5f ) ;
2022-06-25 22:26:42 +00:00
font - > atlasX + = pixelWidth ;
2022-06-27 04:22:45 +00:00
font - > rowHeight = MAX ( font - > rowHeight , pixelHeight ) ;
2022-06-25 22:26:42 +00:00
2022-07-12 05:43:42 +00:00
beginFrame ( ) ;
2022-06-27 03:28:30 +00:00
// Atlas resize
2022-06-25 22:26:42 +00:00
if ( ! font - > atlas | | font - > atlasWidth > font - > atlas - > info . width | | font - > atlasHeight > font - > atlas - > info . height ) {
2022-06-30 03:17:26 +00:00
lovrCheck ( font - > atlasWidth < = 65536 , " Font atlas is way too big! " ) ;
2022-06-25 22:26:42 +00:00
Texture * atlas = lovrTextureCreate ( & ( TextureInfo ) {
. type = TEXTURE_2D ,
2022-06-27 03:57:57 +00:00
. format = FORMAT_RGBA8 ,
2022-06-25 22:26:42 +00:00
. width = font - > atlasWidth ,
. height = font - > atlasHeight ,
2022-07-30 22:08:30 +00:00
. layers = 1 ,
2022-06-25 22:26:42 +00:00
. mipmaps = 1 ,
. usage = TEXTURE_SAMPLE | TEXTURE_TRANSFER ,
. label = " Font Atlas "
} ) ;
float clear [ 4 ] = { 0.f , 0.f , 0.f , 0.f } ;
gpu_clear_texture ( state . stream , atlas - > gpu , clear , 0 , ~ 0u , 0 , ~ 0u ) ;
// This barrier serves 2 purposes:
// - Ensure new atlas clear is finished/flushed before copying to it
// - Ensure any unsynchronized pending uploads to old atlas finish before copying to new atlas
gpu_barrier barrier ;
2024-01-14 22:51:23 +00:00
barrier . prev = GPU_PHASE_COPY | GPU_PHASE_CLEAR ;
barrier . next = GPU_PHASE_COPY ;
2022-06-25 22:26:42 +00:00
barrier . flush = GPU_CACHE_TRANSFER_WRITE ;
2024-01-29 10:10:40 +00:00
barrier . clear = GPU_CACHE_TRANSFER_READ | GPU_CACHE_TRANSFER_WRITE ;
2022-06-25 22:26:42 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
if ( font - > atlas ) {
uint32_t srcOffset [ 4 ] = { 0 , 0 , 0 , 0 } ;
uint32_t dstOffset [ 4 ] = { 0 , 0 , 0 , 0 } ;
uint32_t extent [ 3 ] = { font - > atlas - > info . width , font - > atlas - > info . height , 1 } ;
gpu_copy_textures ( state . stream , font - > atlas - > gpu , atlas - > gpu , srcOffset , dstOffset , extent ) ;
lovrRelease ( font - > atlas , lovrTextureDestroy ) ;
}
font - > atlas = atlas ;
2022-06-27 03:28:30 +00:00
// Material
2022-06-25 22:26:42 +00:00
lovrRelease ( font - > material , lovrMaterialDestroy ) ;
font - > material = lovrMaterialCreate ( & ( MaterialInfo ) {
. data . color = { 1.f , 1.f , 1.f , 1.f } ,
. data . uvScale = { 1.f , 1.f } ,
. data . sdfRange = { font - > info . spread / font - > atlasWidth , font - > info . spread / font - > atlasHeight } ,
. texture = font - > atlas
} ) ;
2022-06-27 03:28:30 +00:00
// Recompute all glyph uvs after atlas resize
for ( size_t i = 0 ; i < font - > glyphs . length ; i + + ) {
2022-06-30 03:17:26 +00:00
Glyph * g = & font - > glyphs . data [ i ] ;
2022-06-30 07:29:52 +00:00
if ( g - > box [ 2 ] - g - > box [ 0 ] > 0.f ) {
g - > uv [ 0 ] = ( uint16_t ) ( ( float ) g - > x / font - > atlasWidth * 65535.f + .5f ) ;
2022-09-23 03:30:04 +00:00
g - > uv [ 1 ] = ( uint16_t ) ( ( float ) ( g - > y + g - > box [ 3 ] - g - > box [ 1 ] ) / font - > atlasHeight * 65535.f + .5f ) ;
2022-06-30 07:29:52 +00:00
g - > uv [ 2 ] = ( uint16_t ) ( ( float ) ( g - > x + g - > box [ 2 ] - g - > box [ 0 ] ) / font - > atlasWidth * 65535.f + .5f ) ;
2022-09-23 03:30:04 +00:00
g - > uv [ 3 ] = ( uint16_t ) ( ( float ) g - > y / font - > atlasHeight * 65535.f + .5f ) ;
2022-06-30 07:29:52 +00:00
}
2022-06-27 03:28:30 +00:00
}
2022-06-30 03:17:26 +00:00
if ( resized ) * resized = true ;
2022-06-25 22:26:42 +00:00
}
2023-04-30 01:33:58 +00:00
size_t stack = tempPush ( & state . allocator ) ;
float * pixels = tempAlloc ( & state . allocator , pixelWidth * pixelHeight * 4 * sizeof ( float ) ) ;
2022-06-30 03:17:26 +00:00
lovrRasterizerGetPixels ( font - > info . rasterizer , glyph - > codepoint , pixels , pixelWidth , pixelHeight , font - > info . spread ) ;
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_UPLOAD , pixelWidth * pixelHeight * 4 * sizeof ( uint8_t ) , 64 ) ;
2022-06-30 03:17:26 +00:00
float * src = pixels ;
2023-12-30 20:39:50 +00:00
uint8_t * dst = view . pointer ;
2022-06-30 03:17:26 +00:00
for ( uint32_t y = 0 ; y < pixelHeight ; y + + ) {
for ( uint32_t x = 0 ; x < pixelWidth ; x + + ) {
for ( uint32_t c = 0 ; c < 4 ; c + + ) {
float f = * src + + ; // CLAMP would evaluate this multiple times
* dst + + = ( uint8_t ) ( CLAMP ( f , 0.f , 1.f ) * 255.f + .5f ) ;
2022-06-27 03:57:57 +00:00
}
}
2022-06-25 22:26:42 +00:00
}
2022-06-30 03:17:26 +00:00
uint32_t dstOffset [ 4 ] = { glyph - > x - font - > padding , glyph - > y - font - > padding , 0 , 0 } ;
uint32_t extent [ 3 ] = { pixelWidth , pixelHeight , 1 } ;
2023-12-30 20:39:50 +00:00
gpu_copy_buffer_texture ( state . stream , view . buffer , font - > atlas - > gpu , view . offset , dstOffset , extent ) ;
2023-04-30 01:33:58 +00:00
tempPop ( & state . allocator , stack ) ;
2022-06-25 22:26:42 +00:00
2024-03-02 01:13:06 +00:00
state . barrier . prev | = GPU_PHASE_COPY ;
state . barrier . next | = GPU_PHASE_SHADER_FRAGMENT ;
state . barrier . flush | = GPU_CACHE_TRANSFER_WRITE ;
state . barrier . clear | = GPU_CACHE_TEXTURE ;
2022-06-30 03:17:26 +00:00
return glyph ;
2022-06-25 22:26:42 +00:00
}
2022-07-04 22:22:54 +00:00
float lovrFontGetKerning ( Font * font , uint32_t first , uint32_t second ) {
uint32_t codepoints [ ] = { first , second } ;
2022-06-25 22:26:42 +00:00
uint64_t hash = hash64 ( codepoints , sizeof ( codepoints ) ) ;
union { float f32 ; uint64_t u64 ; } kerning = { . u64 = map_get ( & font - > kerning , hash ) } ;
if ( kerning . u64 = = MAP_NIL ) {
2022-07-04 22:22:54 +00:00
kerning . f32 = lovrRasterizerGetKerning ( font - > info . rasterizer , first , second ) ;
2022-06-25 22:26:42 +00:00
map_set ( & font - > kerning , hash , kerning . u64 ) ;
}
return kerning . f32 ;
}
2022-07-01 00:25:47 +00:00
float lovrFontGetWidth ( Font * font , ColoredString * strings , uint32_t count ) {
float x = 0.f ;
float maxWidth = 0.f ;
float space = lovrFontGetGlyph ( font , ' ' , NULL ) - > advance ;
for ( uint32_t i = 0 ; i < count ; i + + ) {
size_t bytes ;
uint32_t codepoint ;
uint32_t previous = ' \0 ' ;
const char * str = strings [ i ] . string ;
const char * end = strings [ i ] . string + strings [ i ] . length ;
while ( ( bytes = utf8_decode ( str , end , & codepoint ) ) > 0 ) {
if ( codepoint = = ' ' | | codepoint = = ' \t ' ) {
x + = codepoint = = ' \t ' ? space * 4.f : space ;
previous = ' \0 ' ;
str + = bytes ;
continue ;
} else if ( codepoint = = ' \n ' ) {
maxWidth = MAX ( maxWidth , x ) ;
x = 0.f ;
previous = ' \0 ' ;
str + = bytes ;
continue ;
} else if ( codepoint = = ' \r ' ) {
str + = bytes ;
continue ;
}
Glyph * glyph = lovrFontGetGlyph ( font , codepoint , NULL ) ;
if ( previous ) x + = lovrFontGetKerning ( font , previous , codepoint ) ;
previous = codepoint ;
x + = glyph - > advance ;
str + = bytes ;
}
}
return MAX ( maxWidth , x ) / font - > pixelDensity ;
}
2022-07-01 00:07:47 +00:00
void lovrFontGetLines ( Font * font , ColoredString * strings , uint32_t count , float wrap , void ( * callback ) ( void * context , const char * string , size_t length ) , void * context ) {
2022-06-30 03:17:26 +00:00
size_t totalLength = 0 ;
for ( uint32_t i = 0 ; i < count ; i + + ) {
totalLength + = strings [ i ] . length ;
}
2022-07-12 05:43:42 +00:00
beginFrame ( ) ;
2023-04-30 01:33:58 +00:00
size_t stack = tempPush ( & state . allocator ) ;
char * string = tempAlloc ( & state . allocator , totalLength + 1 ) ;
2022-06-30 03:17:26 +00:00
string [ totalLength ] = ' \0 ' ;
2022-08-07 01:05:30 +00:00
size_t cursor = 0 ;
for ( uint32_t i = 0 ; i < count ; cursor + = strings [ i ] . length , i + + ) {
2022-06-30 03:17:26 +00:00
memcpy ( string + cursor , strings [ i ] . string , strings [ i ] . length ) ;
}
float x = 0.f ;
float nextWordStartX = 0.f ;
wrap * = font - > pixelDensity ;
size_t bytes ;
uint32_t codepoint ;
uint32_t previous = ' \0 ' ;
const char * lineStart = string ;
const char * wordStart = string ;
const char * end = string + totalLength ;
2022-06-30 07:29:52 +00:00
float space = lovrFontGetGlyph ( font , ' ' , NULL ) - > advance ;
2022-06-30 03:17:26 +00:00
while ( ( bytes = utf8_decode ( string , end , & codepoint ) ) > 0 ) {
if ( codepoint = = ' ' | | codepoint = = ' \t ' ) {
2022-06-30 07:29:52 +00:00
x + = codepoint = = ' \t ' ? space * 4.f : space ;
2022-06-30 03:17:26 +00:00
nextWordStartX = x ;
previous = ' \0 ' ;
string + = bytes ;
wordStart = string ;
continue ;
} else if ( codepoint = = ' \n ' ) {
size_t length = string - lineStart ;
while ( string [ length ] = = ' ' | | string [ length ] = = ' \t ' ) length - - ;
callback ( context , lineStart , length ) ;
nextWordStartX = 0.f ;
x = 0.f ;
previous = ' \0 ' ;
string + = bytes ;
lineStart = string ;
wordStart = string ;
continue ;
} else if ( codepoint = = ' \r ' ) {
string + = bytes ;
continue ;
}
Glyph * glyph = lovrFontGetGlyph ( font , codepoint , NULL ) ;
// Keming
if ( previous ) x + = lovrFontGetKerning ( font , previous , codepoint ) ;
previous = codepoint ;
// Wrap
2022-07-01 00:07:47 +00:00
if ( wordStart ! = lineStart & & x + glyph - > advance > wrap ) {
2022-06-30 03:17:26 +00:00
size_t length = wordStart - lineStart ;
while ( string [ length ] = = ' ' | | string [ length ] = = ' \t ' ) length - - ;
callback ( context , lineStart , length ) ;
lineStart = wordStart ;
x - = nextWordStartX ;
nextWordStartX = 0.f ;
previous = ' \0 ' ;
}
// Advance
x + = glyph - > advance ;
string + = bytes ;
}
if ( end - lineStart > 0 ) {
callback ( context , lineStart , end - lineStart ) ;
}
2023-04-30 01:33:58 +00:00
tempPop ( & state . allocator , stack ) ;
2022-06-30 03:17:26 +00:00
}
2022-07-17 23:38:29 +00:00
static void aline ( GlyphVertex * vertices , uint32_t head , uint32_t tail , float width , HorizontalAlign align ) {
if ( align = = ALIGN_LEFT ) return ;
float shift = align / 2.f * width ;
for ( uint32_t i = head ; i < tail ; i + + ) {
vertices [ i ] . position . x - = shift ;
}
}
void lovrFontGetVertices ( Font * font , ColoredString * strings , uint32_t count , float wrap , HorizontalAlign halign , VerticalAlign valign , GlyphVertex * vertices , uint32_t * glyphCount , uint32_t * lineCount , Material * * material , bool flip ) {
uint32_t vertexCount = 0 ;
uint32_t lineStart = 0 ;
uint32_t wordStart = 0 ;
* glyphCount = 0 ;
* lineCount = 1 ;
float x = 0.f ;
float y = 0.f ;
float wordStartX = 0.f ;
float prevWordEndX = 0.f ;
float leading = lovrRasterizerGetLeading ( font - > info . rasterizer ) * font - > lineSpacing ;
float space = lovrFontGetGlyph ( font , ' ' , NULL ) - > advance ;
for ( uint32_t i = 0 ; i < count ; i + + ) {
size_t bytes ;
uint32_t codepoint ;
uint32_t previous = ' \0 ' ;
const char * str = strings [ i ] . string ;
const char * end = strings [ i ] . string + strings [ i ] . length ;
2023-11-09 20:47:39 +00:00
float rf = lovrMathGammaToLinear ( strings [ i ] . color [ 0 ] ) ;
float gf = lovrMathGammaToLinear ( strings [ i ] . color [ 1 ] ) ;
float bf = lovrMathGammaToLinear ( strings [ i ] . color [ 2 ] ) ;
uint8_t r = ( uint8_t ) ( CLAMP ( rf , 0.f , 1.f ) * 255.f ) ;
uint8_t g = ( uint8_t ) ( CLAMP ( gf , 0.f , 1.f ) * 255.f ) ;
uint8_t b = ( uint8_t ) ( CLAMP ( bf , 0.f , 1.f ) * 255.f ) ;
2022-07-17 23:38:29 +00:00
uint8_t a = ( uint8_t ) ( CLAMP ( strings [ i ] . color [ 3 ] , 0.f , 1.f ) * 255.f ) ;
while ( ( bytes = utf8_decode ( str , end , & codepoint ) ) > 0 ) {
if ( codepoint = = ' ' | | codepoint = = ' \t ' ) {
if ( previous ) prevWordEndX = x ;
wordStart = vertexCount ;
x + = codepoint = = ' \t ' ? space * 4.f : space ;
wordStartX = x ;
previous = ' \0 ' ;
str + = bytes ;
continue ;
} else if ( codepoint = = ' \n ' ) {
aline ( vertices , lineStart , vertexCount , x , halign ) ;
lineStart = vertexCount ;
wordStart = vertexCount ;
x = 0.f ;
y - = leading ;
wordStartX = 0.f ;
prevWordEndX = 0.f ;
( * lineCount ) + + ;
previous = ' \0 ' ;
str + = bytes ;
continue ;
} else if ( codepoint = = ' \r ' ) {
str + = bytes ;
continue ;
}
bool resized ;
Glyph * glyph = lovrFontGetGlyph ( font , codepoint , & resized ) ;
if ( resized ) {
lovrFontGetVertices ( font , strings , count , wrap , halign , valign , vertices , glyphCount , lineCount , material , flip ) ;
return ;
}
// Keming
if ( previous ) x + = lovrFontGetKerning ( font , previous , codepoint ) ;
previous = codepoint ;
// Wrap
if ( wrap > 0.f & & x + glyph - > advance > wrap & & wordStart ! = lineStart ) {
float dx = wordStartX ;
float dy = leading ;
// Shift the vertices of the overflowing word down a line and back to the beginning
for ( uint32_t v = wordStart ; v < vertexCount ; v + + ) {
vertices [ v ] . position . x - = dx ;
2023-05-12 16:02:18 +00:00
vertices [ v ] . position . y + = flip ? dy : - dy ;
2022-07-17 23:38:29 +00:00
}
aline ( vertices , lineStart , wordStart , prevWordEndX , halign ) ;
lineStart = wordStart ;
wordStartX = 0.f ;
( * lineCount ) + + ;
x - = dx ;
y - = dy ;
}
// Vertices
float * bb = glyph - > box ;
uint16_t * uv = glyph - > uv ;
if ( flip ) {
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 0 ] , - ( y + bb [ 1 ] ) } , { uv [ 0 ] , uv [ 3 ] } , { r , g , b , a } } ;
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 2 ] , - ( y + bb [ 1 ] ) } , { uv [ 2 ] , uv [ 3 ] } , { r , g , b , a } } ;
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 0 ] , - ( y + bb [ 3 ] ) } , { uv [ 0 ] , uv [ 1 ] } , { r , g , b , a } } ;
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 2 ] , - ( y + bb [ 3 ] ) } , { uv [ 2 ] , uv [ 1 ] } , { r , g , b , a } } ;
} else {
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 0 ] , y + bb [ 3 ] } , { uv [ 0 ] , uv [ 1 ] } , { r , g , b , a } } ;
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 2 ] , y + bb [ 3 ] } , { uv [ 2 ] , uv [ 1 ] } , { r , g , b , a } } ;
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 0 ] , y + bb [ 1 ] } , { uv [ 0 ] , uv [ 3 ] } , { r , g , b , a } } ;
vertices [ vertexCount + + ] = ( GlyphVertex ) { { x + bb [ 2 ] , y + bb [ 1 ] } , { uv [ 2 ] , uv [ 3 ] } , { r , g , b , a } } ;
}
( * glyphCount ) + + ;
// Advance
x + = glyph - > advance ;
str + = bytes ;
}
}
// Align last line
aline ( vertices , lineStart , vertexCount , x , halign ) ;
* material = font - > material ;
}
2023-06-11 05:06:29 +00:00
// Mesh
Mesh * lovrMeshCreate ( const MeshInfo * info , void * * vertices ) {
2023-08-25 20:42:09 +00:00
Buffer * buffer = info - > vertexBuffer ;
if ( buffer ) {
lovrCheck ( buffer - > info . format , " Mesh vertex buffer must have format information " ) ;
2024-04-16 22:04:20 +00:00
lovrCheck ( ! buffer - > complexFormat , " Mesh vertex buffer must use a format without nested types or arrays " ) ;
2023-08-25 20:42:09 +00:00
lovrCheck ( info - > storage = = MESH_GPU , " Mesh storage must be 'gpu' when created from a Buffer " ) ;
lovrRetain ( buffer ) ;
} else {
lovrCheck ( info - > vertexFormat - > length > 0 , " Mesh must have at least one vertex " ) ;
BufferInfo bufferInfo = { . format = info - > vertexFormat } ;
buffer = lovrBufferCreate ( & bufferInfo , info - > storage = = MESH_GPU ? vertices : NULL ) ;
2023-09-19 06:05:27 +00:00
if ( ! vertices ) lovrBufferClear ( buffer , 0 , ~ 0u , 0 ) ;
2023-08-25 20:42:09 +00:00
}
DataField * format = buffer - > info . format ;
2023-06-11 05:06:29 +00:00
lovrCheck ( format - > stride < = state . limits . vertexBufferStride , " Mesh vertex buffer stride exceeds the vertexBufferStride limit of this GPU " ) ;
2023-08-25 20:42:09 +00:00
lovrCheck ( format - > fieldCount < = state . limits . vertexAttributes , " Mesh attribute count exceeds the vertexAttributes limit of this GPU " ) ;
2023-06-11 05:06:29 +00:00
2024-04-16 22:04:20 +00:00
for ( uint32_t i = 0 ; i < MAX ( format - > fieldCount , 1 ) ; i + + ) {
const DataField * attribute = format - > fieldCount > 0 ? & format - > fields [ i ] : format ;
2023-08-25 20:42:09 +00:00
lovrCheck ( attribute - > offset < 256 , " Max Mesh attribute offset is 255 " ) ; // Limited by u8 gpu_attribute offset
2023-06-11 05:06:29 +00:00
lovrCheck ( attribute - > type < TYPE_MAT2 | | attribute - > type > TYPE_MAT4 , " Currently, Mesh attributes can not use matrix types " ) ;
lovrCheck ( attribute - > type < TYPE_INDEX16 | | attribute - > type > TYPE_INDEX32 , " Mesh attributes can not use index types " ) ;
}
2024-03-11 21:38:00 +00:00
Mesh * mesh = lovrCalloc ( sizeof ( Mesh ) ) ;
2023-06-11 05:06:29 +00:00
mesh - > ref = 1 ;
2023-08-25 20:42:09 +00:00
mesh - > vertexBuffer = buffer ;
2023-06-11 05:06:29 +00:00
mesh - > storage = info - > storage ;
mesh - > mode = DRAW_TRIANGLES ;
if ( info - > vertexBuffer ) {
2023-08-25 20:42:09 +00:00
lovrRetain ( info - > vertexBuffer ) ;
} else if ( mesh - > storage = = MESH_CPU ) {
2024-03-11 21:38:00 +00:00
mesh - > vertices = vertices ? lovrMalloc ( buffer - > info . size ) : lovrCalloc ( buffer - > info . size ) ;
2023-06-11 05:06:29 +00:00
2023-08-25 20:42:09 +00:00
if ( vertices ) {
* vertices = mesh - > vertices ;
mesh - > dirtyVertices [ 0 ] = 0 ;
mesh - > dirtyVertices [ 1 ] = format - > length ;
} else {
mesh - > dirtyVertices [ 0 ] = ~ 0u ;
mesh - > dirtyVertices [ 1 ] = 0 ;
2023-06-11 05:06:29 +00:00
}
}
return mesh ;
}
void lovrMeshDestroy ( void * ref ) {
Mesh * mesh = ref ;
lovrRelease ( mesh - > vertexBuffer , lovrBufferDestroy ) ;
lovrRelease ( mesh - > indexBuffer , lovrBufferDestroy ) ;
lovrRelease ( mesh - > material , lovrMaterialDestroy ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( mesh - > vertices ) ;
lovrFree ( mesh - > indices ) ;
lovrFree ( mesh ) ;
2023-06-11 05:06:29 +00:00
}
const DataField * lovrMeshGetVertexFormat ( Mesh * mesh ) {
return mesh - > vertexBuffer - > info . format ;
}
2023-08-25 20:42:09 +00:00
const DataField * lovrMeshGetIndexFormat ( Mesh * mesh ) {
return mesh - > indexCount > 0 | | ! mesh - > indexBuffer ? mesh - > indexBuffer - > info . format : NULL ;
}
2023-06-11 05:06:29 +00:00
Buffer * lovrMeshGetVertexBuffer ( Mesh * mesh ) {
return mesh - > storage = = MESH_CPU ? NULL : mesh - > vertexBuffer ;
}
Buffer * lovrMeshGetIndexBuffer ( Mesh * mesh ) {
return mesh - > storage = = MESH_CPU ? NULL : mesh - > indexBuffer ;
}
void lovrMeshSetIndexBuffer ( Mesh * mesh , Buffer * buffer ) {
lovrCheck ( mesh - > storage = = MESH_GPU , " Mesh can only use a Buffer for indices if it was created with 'gpu' storage mode " ) ;
2023-08-25 20:42:09 +00:00
DataField * format = buffer - > info . format ;
lovrCheck ( format , " Mesh index buffer must have been created with a format " ) ;
2024-04-16 22:04:20 +00:00
lovrCheck ( format - > length > 0 , " Mesh index buffer length can not be zero " ) ;
DataType type = format - > type ;
if ( format - > fieldCount > 0 | | ( type ! = TYPE_U16 & & type ! = TYPE_U32 & & type ! = TYPE_INDEX16 & & type ! = TYPE_INDEX32 ) ) {
2023-08-25 20:42:09 +00:00
lovrThrow ( " Mesh index buffer must use the u16, u32, index16, or index32 type " ) ;
} else {
uint32_t stride = ( type = = TYPE_U16 | | type = = TYPE_INDEX16 ) ? 2 : 4 ;
2024-04-16 22:04:20 +00:00
lovrCheck ( format - > stride = = stride & & format - > offset = = 0 , " Mesh index buffer must be tightly packed " ) ;
2023-06-11 05:06:29 +00:00
}
lovrRelease ( mesh - > indexBuffer , lovrBufferDestroy ) ;
mesh - > indexBuffer = buffer ;
mesh - > indexCount = format - > length ;
lovrRetain ( buffer ) ;
}
void * lovrMeshGetVertices ( Mesh * mesh , uint32_t index , uint32_t count ) {
const DataField * format = lovrMeshGetVertexFormat ( mesh ) ;
if ( count = = ~ 0u ) count = format - > length - index ;
lovrCheck ( index < format - > length & & count < = format - > length - index , " Mesh vertex range [%d,%d] overflows mesh capacity " , index + 1 , index + 1 + count - 1 ) ;
if ( mesh - > storage = = MESH_CPU ) {
return ( char * ) mesh - > vertices + index * format - > stride ;
} else {
return lovrBufferGetData ( mesh - > vertexBuffer , index * format - > stride , count * format - > stride ) ;
}
}
void * lovrMeshSetVertices ( Mesh * mesh , uint32_t index , uint32_t count ) {
const DataField * format = lovrMeshGetVertexFormat ( mesh ) ;
if ( count = = ~ 0u ) count = format - > length - index ;
lovrCheck ( index < format - > length & & count < = format - > length - index , " Mesh vertex range [%d,%d] overflows mesh capacity " , index + 1 , index + 1 + count - 1 ) ;
if ( mesh - > storage = = MESH_CPU ) {
mesh - > dirtyVertices [ 0 ] = MIN ( mesh - > dirtyVertices [ 0 ] , index ) ;
mesh - > dirtyVertices [ 1 ] = MAX ( mesh - > dirtyVertices [ 1 ] , index + count ) ;
return ( char * ) mesh - > vertices + index * format - > stride ;
} else {
return lovrBufferSetData ( mesh - > vertexBuffer , index * format - > stride , count * format - > stride ) ;
}
}
2023-08-25 20:42:09 +00:00
void * lovrMeshGetIndices ( Mesh * mesh , uint32_t * count , DataType * type ) {
2023-06-11 05:06:29 +00:00
if ( mesh - > indexCount = = 0 | | ! mesh - > indexBuffer ) {
return NULL ;
}
2023-08-25 20:42:09 +00:00
* count = mesh - > indexCount ;
2024-04-16 22:04:20 +00:00
* type = mesh - > indexBuffer - > info . format - > type ;
2023-06-11 05:06:29 +00:00
if ( mesh - > storage = = MESH_CPU ) {
return mesh - > indices ;
} else {
2023-08-25 20:42:09 +00:00
return lovrBufferGetData ( mesh - > indexBuffer , 0 , mesh - > indexCount * mesh - > indexBuffer - > info . format - > stride ) ;
2023-06-11 05:06:29 +00:00
}
}
void * lovrMeshSetIndices ( Mesh * mesh , uint32_t count , DataType type ) {
const DataField * format = mesh - > indexBuffer ? mesh - > indexBuffer - > info . format : NULL ;
mesh - > indexCount = count ;
mesh - > dirtyIndices = true ;
2024-04-16 22:04:20 +00:00
if ( ! mesh - > indexBuffer | | count > format - > length | | type ! = format - > type ) {
2023-06-11 05:06:29 +00:00
lovrRelease ( mesh - > indexBuffer , lovrBufferDestroy ) ;
2023-08-25 20:42:09 +00:00
uint32_t stride = ( type = = TYPE_U16 | | type = = TYPE_INDEX16 ) ? 2 : 4 ;
2024-04-16 22:04:20 +00:00
DataField format = { . length = count , . stride = stride , . type = type } ;
BufferInfo info = { . format = & format } ;
2023-06-11 05:06:29 +00:00
if ( mesh - > storage = = MESH_CPU ) {
mesh - > indexBuffer = lovrBufferCreate ( & info , NULL ) ;
2023-08-25 20:42:09 +00:00
mesh - > indices = realloc ( mesh - > indices , count * stride ) ;
2023-06-11 05:06:29 +00:00
lovrAssert ( mesh - > indices , " Out of memory " ) ;
return mesh - > indices ;
} else {
void * data = NULL ;
mesh - > indexBuffer = lovrBufferCreate ( & info , & data ) ;
return data ;
}
} else if ( mesh - > storage = = MESH_CPU ) {
return mesh - > indices ;
} else {
return lovrBufferSetData ( mesh - > indexBuffer , 0 , count * format - > stride ) ;
}
}
static float * lovrMeshGetPositions ( Mesh * mesh ) {
if ( mesh - > storage = = MESH_GPU ) return NULL ;
const DataField * format = lovrMeshGetVertexFormat ( mesh ) ;
uint32_t positionHash = ( uint32_t ) hash64 ( " VertexPosition " , strlen ( " VertexPosition " ) ) ;
2024-04-16 22:04:20 +00:00
for ( uint32_t i = 0 ; i < MAX ( format - > fieldCount , 1 ) ; i + + ) {
const DataField * attribute = format - > fieldCount > 0 ? & format - > fields [ i ] : format ;
2023-06-11 05:06:29 +00:00
if ( attribute - > type ! = TYPE_F32x3 ) continue ;
2023-08-25 20:42:09 +00:00
if ( ( attribute - > hash = = LOCATION_POSITION | | attribute - > hash = = positionHash ) ) {
2023-06-11 05:06:29 +00:00
return ( float * ) ( ( char * ) mesh - > vertices + attribute - > offset ) ;
}
}
return NULL ;
}
void lovrMeshGetTriangles ( Mesh * mesh , float * * vertices , uint32_t * * indices , uint32_t * vertexCount , uint32_t * indexCount ) {
float * position = lovrMeshGetPositions ( mesh ) ;
lovrCheck ( mesh - > storage = = MESH_CPU , " Mesh storage mode must be 'cpu' " ) ;
lovrCheck ( mesh - > mode = = DRAW_TRIANGLES , " Mesh draw mode must be 'triangles' " ) ;
lovrCheck ( position , " Mesh has no VertexPosition attribute with vec3 type " ) ;
const DataField * format = lovrMeshGetVertexFormat ( mesh ) ;
2024-03-11 21:38:00 +00:00
* vertices = lovrMalloc ( format - > length * 3 * sizeof ( float ) ) ;
2023-06-11 05:06:29 +00:00
for ( uint32_t i = 0 ; i < format - > length ; i + + ) {
vec3_init ( * vertices , position ) ;
position = ( float * ) ( ( char * ) position + format - > stride ) ;
* vertices + = 3 ;
}
if ( mesh - > indexCount > 0 ) {
* indexCount = mesh - > indexCount ;
2024-03-11 21:38:00 +00:00
* indices = lovrMalloc ( * indexCount * sizeof ( uint32_t ) ) ;
2024-04-16 22:04:20 +00:00
if ( mesh - > indexBuffer - > info . format - > type = = TYPE_U16 | | mesh - > indexBuffer - > info . format - > type = = TYPE_INDEX16 ) {
2023-06-11 05:06:29 +00:00
for ( uint32_t i = 0 ; i < mesh - > indexCount ; i + + ) {
* indices [ i ] = ( uint32_t ) ( ( uint16_t * ) mesh - > indices ) [ i ] ;
}
} else {
memcpy ( * indices , mesh - > indices , mesh - > indexCount * sizeof ( uint32_t ) ) ;
}
} else {
* indexCount = format - > length ;
2024-03-11 21:38:00 +00:00
* indices = lovrMalloc ( * indexCount * sizeof ( uint32_t ) ) ;
2023-06-11 05:06:29 +00:00
lovrCheck ( format - > length > = 3 & & format - > length % 3 = = 0 , " Mesh vertex count must be divisible by 3 " ) ;
for ( uint32_t i = 0 ; i < format - > length ; i + + ) {
* * indices = i ;
* indices + = 1 ;
}
}
}
bool lovrMeshGetBoundingBox ( Mesh * mesh , float box [ 6 ] ) {
box [ 0 ] = mesh - > bounds [ 0 ] - mesh - > bounds [ 3 ] ;
Mesh bounding box uses same convention as everyone else;
It was using
minx, miny, minz, maxx, maxy, maxz
Instead of
minx, maxx, miny, maxy, minz, maxz
2023-09-28 09:26:53 +00:00
box [ 1 ] = mesh - > bounds [ 0 ] + mesh - > bounds [ 3 ] ;
box [ 2 ] = mesh - > bounds [ 1 ] - mesh - > bounds [ 4 ] ;
box [ 3 ] = mesh - > bounds [ 1 ] + mesh - > bounds [ 4 ] ;
box [ 4 ] = mesh - > bounds [ 2 ] - mesh - > bounds [ 5 ] ;
2023-06-11 05:06:29 +00:00
box [ 5 ] = mesh - > bounds [ 2 ] + mesh - > bounds [ 5 ] ;
return mesh - > hasBounds ;
}
void lovrMeshSetBoundingBox ( Mesh * mesh , float box [ 6 ] ) {
if ( box ) {
Mesh bounding box uses same convention as everyone else;
It was using
minx, miny, minz, maxx, maxy, maxz
Instead of
minx, maxx, miny, maxy, minz, maxz
2023-09-28 09:26:53 +00:00
mesh - > bounds [ 0 ] = ( box [ 0 ] + box [ 1 ] ) / 2.f ;
mesh - > bounds [ 1 ] = ( box [ 2 ] + box [ 3 ] ) / 2.f ;
mesh - > bounds [ 2 ] = ( box [ 4 ] + box [ 5 ] ) / 2.f ;
mesh - > bounds [ 3 ] = ( box [ 1 ] - box [ 0 ] ) / 2.f ;
mesh - > bounds [ 4 ] = ( box [ 3 ] - box [ 2 ] ) / 2.f ;
mesh - > bounds [ 5 ] = ( box [ 5 ] - box [ 4 ] ) / 2.f ;
2023-06-11 05:06:29 +00:00
mesh - > hasBounds = true ;
} else {
mesh - > hasBounds = false ;
}
}
bool lovrMeshComputeBoundingBox ( Mesh * mesh ) {
const DataField * format = lovrMeshGetVertexFormat ( mesh ) ;
float * position = lovrMeshGetPositions ( mesh ) ;
if ( ! position ) {
return false ;
}
Mesh bounding box uses same convention as everyone else;
It was using
minx, miny, minz, maxx, maxy, maxz
Instead of
minx, maxx, miny, maxy, minz, maxz
2023-09-28 09:26:53 +00:00
float box [ 6 ] = { FLT_MAX , FLT_MIN , FLT_MAX , FLT_MIN , FLT_MAX , FLT_MIN } ;
2023-06-11 05:06:29 +00:00
for ( uint32_t i = 0 ; i < format - > length ; i + + , position = ( float * ) ( ( char * ) position + format - > stride ) ) {
box [ 0 ] = MIN ( box [ 0 ] , position [ 0 ] ) ;
Mesh bounding box uses same convention as everyone else;
It was using
minx, miny, minz, maxx, maxy, maxz
Instead of
minx, maxx, miny, maxy, minz, maxz
2023-09-28 09:26:53 +00:00
box [ 1 ] = MAX ( box [ 1 ] , position [ 0 ] ) ;
box [ 2 ] = MIN ( box [ 2 ] , position [ 1 ] ) ;
box [ 3 ] = MAX ( box [ 3 ] , position [ 1 ] ) ;
box [ 4 ] = MIN ( box [ 4 ] , position [ 2 ] ) ;
2023-06-11 05:06:29 +00:00
box [ 5 ] = MAX ( box [ 5 ] , position [ 2 ] ) ;
}
lovrMeshSetBoundingBox ( mesh , box ) ;
return true ;
}
DrawMode lovrMeshGetDrawMode ( Mesh * mesh ) {
return mesh - > mode ;
}
void lovrMeshSetDrawMode ( Mesh * mesh , DrawMode mode ) {
mesh - > mode = mode ;
}
void lovrMeshGetDrawRange ( Mesh * mesh , uint32_t * start , uint32_t * count , uint32_t * offset ) {
* start = mesh - > drawStart ;
* count = mesh - > drawCount ;
* offset = mesh - > baseVertex ;
}
void lovrMeshSetDrawRange ( Mesh * mesh , uint32_t start , uint32_t count , uint32_t offset ) {
uint32_t vertexCount = mesh - > vertexBuffer - > info . format - > length ;
uint32_t extent = mesh - > indexCount > 0 ? mesh - > indexCount : vertexCount ;
lovrCheck ( start < extent & & count < = extent - start , " Invalid draw range [%d,%d] " , start + 1 , start + 1 + count ) ;
lovrCheck ( offset < vertexCount , " Mesh vertex offset must be less than the vertex count " ) ;
mesh - > drawStart = start ;
mesh - > drawCount = count ;
mesh - > baseVertex = offset ;
}
Material * lovrMeshGetMaterial ( Mesh * mesh ) {
return mesh - > material ;
}
void lovrMeshSetMaterial ( Mesh * mesh , Material * material ) {
lovrRelease ( mesh - > material , lovrMaterialDestroy ) ;
mesh - > material = material ;
lovrRetain ( material ) ;
}
static void lovrMeshFlush ( Mesh * mesh ) {
if ( mesh - > storage = = MESH_GPU ) {
return ;
}
if ( mesh - > dirtyVertices [ 1 ] > mesh - > dirtyVertices [ 0 ] ) {
uint32_t stride = mesh - > vertexBuffer - > info . format - > stride ;
uint32_t offset = mesh - > dirtyVertices [ 0 ] * stride ;
uint32_t extent = ( mesh - > dirtyVertices [ 1 ] - mesh - > dirtyVertices [ 0 ] ) * stride ;
void * data = lovrBufferSetData ( mesh - > vertexBuffer , offset , extent ) ;
memcpy ( data , ( char * ) mesh - > vertices + offset , extent ) ;
mesh - > dirtyVertices [ 0 ] = ~ 0u ;
mesh - > dirtyVertices [ 1 ] = 0 ;
}
if ( mesh - > dirtyIndices ) {
uint32_t stride = mesh - > indexBuffer - > info . format - > stride ;
void * data = lovrBufferSetData ( mesh - > indexBuffer , 0 , mesh - > indexCount * stride ) ;
memcpy ( data , mesh - > indices , mesh - > indexCount * stride ) ;
mesh - > dirtyIndices = false ;
}
}
2022-07-04 00:26:31 +00:00
// Model
2022-07-13 07:07:15 +00:00
Model * lovrModelCreate ( const ModelInfo * info ) {
2022-07-04 00:26:31 +00:00
ModelData * data = info - > data ;
2024-03-11 21:38:00 +00:00
Model * model = lovrCalloc ( sizeof ( Model ) ) ;
2022-07-04 00:26:31 +00:00
model - > ref = 1 ;
model - > info = * info ;
lovrRetain ( info - > data ) ;
2023-03-15 02:36:09 +00:00
for ( uint32_t i = 0 ; i < data - > skinCount ; i + + ) {
lovrCheck ( data - > skins [ i ] . jointCount < = 256 , " Currently, the max number of joints per skin is 256 " ) ;
}
2022-07-17 19:04:47 +00:00
// Materials and Textures
2023-06-10 01:32:54 +00:00
if ( info - > materials ) {
2024-03-11 21:38:00 +00:00
model - > textures = lovrCalloc ( data - > imageCount * sizeof ( Texture * ) ) ;
model - > materials = lovrMalloc ( data - > materialCount * sizeof ( Material * ) ) ;
2023-06-10 01:32:54 +00:00
for ( uint32_t i = 0 ; i < data - > materialCount ; i + + ) {
MaterialInfo material ;
ModelMaterial * properties = & data - > materials [ i ] ;
memcpy ( & material . data , properties , sizeof ( MaterialData ) ) ;
struct { uint32_t index ; Texture * * texture ; } textures [ ] = {
{ properties - > texture , & material . texture } ,
{ properties - > glowTexture , & material . glowTexture } ,
{ properties - > metalnessTexture , & material . metalnessTexture } ,
{ properties - > roughnessTexture , & material . roughnessTexture } ,
{ properties - > clearcoatTexture , & material . clearcoatTexture } ,
{ properties - > occlusionTexture , & material . occlusionTexture } ,
{ properties - > normalTexture , & material . normalTexture }
} ;
2022-07-17 19:04:47 +00:00
2023-06-10 01:32:54 +00:00
for ( uint32_t t = 0 ; t < COUNTOF ( textures ) ; t + + ) {
uint32_t index = textures [ t ] . index ;
Texture * * texture = textures [ t ] . texture ;
if ( index = = ~ 0u ) {
* texture = NULL ;
} else {
if ( ! model - > textures [ index ] ) {
model - > textures [ index ] = lovrTextureCreate ( & ( TextureInfo ) {
. type = TEXTURE_2D ,
. usage = TEXTURE_SAMPLE ,
. format = lovrImageGetFormat ( data - > images [ index ] ) ,
. width = lovrImageGetWidth ( data - > images [ index ] , 0 ) ,
. height = lovrImageGetHeight ( data - > images [ index ] , 0 ) ,
. layers = 1 ,
. mipmaps = info - > mipmaps | | lovrImageGetLevelCount ( data - > images [ index ] ) > 1 ? ~ 0u : 1 ,
. srgb = texture = = & material . texture | | texture = = & material . glowTexture ,
. images = & data - > images [ index ] ,
. imageCount = 1
} ) ;
}
2022-07-17 19:04:47 +00:00
2023-06-10 01:32:54 +00:00
* texture = model - > textures [ index ] ;
2022-07-17 19:04:47 +00:00
}
}
2023-06-10 01:32:54 +00:00
model - > materials [ i ] = lovrMaterialCreate ( & material ) ;
}
2022-07-04 00:26:31 +00:00
}
// Buffers
2023-03-15 02:36:09 +00:00
char * vertexData = NULL ;
char * indexData = NULL ;
char * blendData = NULL ;
2022-08-07 02:23:41 +00:00
char * skinData = NULL ;
2022-07-04 00:26:31 +00:00
BufferInfo vertexBufferInfo = {
2023-08-25 20:42:09 +00:00
. format = ( DataField [ ] ) {
{ . length = data - > vertexCount , . stride = sizeof ( ModelVertex ) , . fieldCount = 5 } ,
{ . type = TYPE_F32x3 , . offset = offsetof ( ModelVertex , position ) , . hash = LOCATION_POSITION } ,
2024-01-21 01:37:00 +00:00
{ . type = TYPE_SN10x3 , . offset = offsetof ( ModelVertex , normal ) , . hash = LOCATION_NORMAL } ,
2023-08-25 20:42:09 +00:00
{ . type = TYPE_F32x2 , . offset = offsetof ( ModelVertex , uv ) , . hash = LOCATION_UV } ,
{ . type = TYPE_UN8x4 , . offset = offsetof ( ModelVertex , color ) , . hash = LOCATION_COLOR } ,
2024-01-21 01:37:00 +00:00
{ . type = TYPE_SN10x3 , . offset = offsetof ( ModelVertex , tangent ) , . hash = LOCATION_TANGENT }
2023-08-25 20:42:09 +00:00
}
2022-07-04 00:26:31 +00:00
} ;
2024-01-29 08:32:44 +00:00
if ( data - > vertexCount > 0 ) {
model - > vertexBuffer = lovrBufferCreate ( & vertexBufferInfo , ( void * * ) & vertexData ) ;
}
2023-03-15 02:36:09 +00:00
if ( data - > blendShapeVertexCount > 0 ) {
model - > blendBuffer = lovrBufferCreate ( & ( BufferInfo ) {
2023-08-25 20:42:09 +00:00
. format = ( DataField [ ] ) {
{ . length = data - > blendShapeVertexCount , . stride = sizeof ( BlendVertex ) , . fieldCount = 3 } ,
{ . type = TYPE_F32x3 , . offset = offsetof ( BlendVertex , position ) } ,
{ . type = TYPE_F32x3 , . offset = offsetof ( BlendVertex , normal ) } ,
{ . type = TYPE_F32x3 , . offset = offsetof ( BlendVertex , tangent ) }
}
2023-03-15 02:36:09 +00:00
} , ( void * * ) & blendData ) ;
}
2022-07-04 00:26:31 +00:00
if ( data - > skinnedVertexCount > 0 ) {
model - > skinBuffer = lovrBufferCreate ( & ( BufferInfo ) {
2023-08-25 20:42:09 +00:00
. format = ( DataField [ ] ) {
{ . length = data - > skinnedVertexCount , . stride = 8 , . fieldCount = 2 } ,
{ . type = TYPE_UN8x4 , . offset = 0 } ,
{ . type = TYPE_U8x4 , . offset = 4 }
}
2022-07-04 00:26:31 +00:00
} , ( void * * ) & skinData ) ;
2023-03-15 02:36:09 +00:00
}
2022-07-04 00:26:31 +00:00
2023-03-15 02:36:09 +00:00
// Dynamic vertices are ones that are blended or skinned. They need a copy of the original vertex
if ( data - > dynamicVertexCount > 0 ) {
2023-08-25 20:42:09 +00:00
vertexBufferInfo . format - > length = data - > dynamicVertexCount ;
2022-07-04 00:26:31 +00:00
model - > rawVertexBuffer = lovrBufferCreate ( & vertexBufferInfo , NULL ) ;
beginFrame ( ) ;
2023-04-30 06:02:37 +00:00
// The vertex buffer may already have a pending copy if its memory was not host-visible, need to
// wait for that to complete before copying to the raw vertex buffer
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & model - > vertexBuffer - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
2023-04-30 06:02:37 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2023-12-30 22:17:20 +00:00
Buffer * src = model - > vertexBuffer ;
Buffer * dst = model - > rawVertexBuffer ;
gpu_copy_buffers ( state . stream , src - > gpu , dst - > gpu , src - > base , dst - > base , data - > dynamicVertexCount * sizeof ( ModelVertex ) ) ;
2022-07-04 00:26:31 +00:00
2023-04-30 06:02:37 +00:00
gpu_sync ( state . stream , & ( gpu_barrier ) {
2024-01-14 22:51:23 +00:00
. prev = GPU_PHASE_COPY ,
2023-04-30 06:02:37 +00:00
. next = GPU_PHASE_SHADER_COMPUTE ,
. flush = GPU_CACHE_TRANSFER_WRITE ,
. clear = GPU_CACHE_STORAGE_READ | GPU_CACHE_STORAGE_WRITE
} , 1 ) ;
2022-07-04 00:26:31 +00:00
}
2024-04-16 22:04:20 +00:00
DataType indexType = data - > indexType = = U32 ? TYPE_INDEX32 : TYPE_INDEX16 ;
2022-08-07 01:05:30 +00:00
uint32_t indexSize = data - > indexType = = U32 ? 4 : 2 ;
2022-07-04 00:26:31 +00:00
if ( data - > indexCount > 0 ) {
model - > indexBuffer = lovrBufferCreate ( & ( BufferInfo ) {
2023-08-25 20:42:09 +00:00
. format = ( DataField [ ] ) {
2024-04-16 22:04:20 +00:00
{ . length = data - > indexCount , . stride = indexSize , . type = indexType }
2023-01-16 13:15:13 +00:00
}
2023-03-15 02:36:09 +00:00
} , ( void * * ) & indexData ) ;
2022-07-04 00:26:31 +00:00
}
2023-03-15 02:36:09 +00:00
// Primitives are sorted to simplify animation:
// - Skinned primitives come first, ordered by skin
// - Primitives with blend shapes are next
// - Then "non-dynamic" primitives follow
// Within each section primitives are still sorted by their index.
2023-04-30 01:33:58 +00:00
size_t stack = tempPush ( & state . allocator ) ;
uint64_t * primitiveOrder = tempAlloc ( & state . allocator , data - > primitiveCount * sizeof ( uint64_t ) ) ;
uint32_t * baseVertex = tempAlloc ( & state . allocator , data - > primitiveCount * sizeof ( uint32_t ) ) ;
2022-07-04 00:26:31 +00:00
for ( uint32_t i = 0 ; i < data - > primitiveCount ; i + + ) {
2023-03-15 02:36:09 +00:00
uint32_t hi = data - > primitives [ i ] . skin ;
2023-03-17 03:31:15 +00:00
if ( hi = = ~ 0u & & ! ! data - > primitives [ i ] . blendShapes ) hi - - ;
2023-03-15 02:36:09 +00:00
primitiveOrder [ i ] = ( ( uint64_t ) hi < < 32 ) | i ;
2022-07-04 00:26:31 +00:00
}
2023-03-15 02:36:09 +00:00
qsort ( primitiveOrder , data - > primitiveCount , sizeof ( uint64_t ) , u64cmp ) ;
2022-07-04 00:26:31 +00:00
// Draws
2024-03-11 21:38:00 +00:00
model - > draws = lovrCalloc ( data - > primitiveCount * sizeof ( DrawInfo ) ) ;
model - > boundingBoxes = lovrMalloc ( data - > primitiveCount * 6 * sizeof ( float ) ) ;
2022-07-04 00:26:31 +00:00
for ( uint32_t i = 0 , vertexCursor = 0 , indexCursor = 0 ; i < data - > primitiveCount ; i + + ) {
2023-03-15 02:36:09 +00:00
ModelPrimitive * primitive = & data - > primitives [ primitiveOrder [ i ] & ~ 0u ] ;
2023-06-23 21:41:39 +00:00
ModelAttribute * position = primitive - > attributes [ ATTR_POSITION ] ;
2023-04-30 06:02:37 +00:00
DrawInfo * draw = & model - > draws [ primitiveOrder [ i ] & ~ 0u ] ;
2022-07-04 00:26:31 +00:00
switch ( primitive - > mode ) {
2023-06-11 05:06:29 +00:00
case DRAW_POINT_LIST : draw - > mode = DRAW_POINTS ; break ;
case DRAW_LINE_LIST : draw - > mode = DRAW_LINES ; break ;
case DRAW_TRIANGLE_LIST : draw - > mode = DRAW_TRIANGLES ; break ;
2022-07-04 00:26:31 +00:00
default : lovrThrow ( " Model uses an unsupported draw mode (lineloop, linestrip, strip, fan) " ) ;
}
2023-06-10 01:32:54 +00:00
draw - > material = ! info - > materials | | primitive - > material = = ~ 0u ? NULL : model - > materials [ primitive - > material ] ;
2022-07-04 00:26:31 +00:00
draw - > vertex . buffer = model - > vertexBuffer ;
if ( primitive - > indices ) {
draw - > index . buffer = model - > indexBuffer ;
draw - > start = indexCursor ;
draw - > count = primitive - > indices - > count ;
2024-01-18 00:05:37 +00:00
draw - > baseVertex = vertexCursor ;
2022-07-04 00:26:31 +00:00
indexCursor + = draw - > count ;
} else {
draw - > start = vertexCursor ;
2023-06-23 21:41:39 +00:00
draw - > count = position - > count ;
2022-07-04 00:26:31 +00:00
}
2023-06-23 21:41:39 +00:00
draw - > bounds = model - > boundingBoxes + i * 6 ;
draw - > bounds [ 0 ] = ( position - > min [ 0 ] + position - > max [ 0 ] ) / 2.f ;
draw - > bounds [ 1 ] = ( position - > min [ 1 ] + position - > max [ 1 ] ) / 2.f ;
draw - > bounds [ 2 ] = ( position - > min [ 2 ] + position - > max [ 2 ] ) / 2.f ;
draw - > bounds [ 3 ] = ( position - > max [ 0 ] - position - > min [ 0 ] ) / 2.f ;
draw - > bounds [ 4 ] = ( position - > max [ 1 ] - position - > min [ 1 ] ) / 2.f ;
draw - > bounds [ 5 ] = ( position - > max [ 2 ] - position - > min [ 2 ] ) / 2.f ;
2023-03-15 02:36:09 +00:00
baseVertex [ i ] = vertexCursor ;
2023-06-23 21:41:39 +00:00
vertexCursor + = position - > count ;
2022-07-04 00:26:31 +00:00
}
// Vertices
for ( uint32_t i = 0 ; i < data - > primitiveCount ; i + + ) {
2023-03-15 02:36:09 +00:00
ModelPrimitive * primitive = & data - > primitives [ primitiveOrder [ i ] & ~ 0u ] ;
2022-07-04 00:26:31 +00:00
ModelAttribute * * attributes = primitive - > attributes ;
uint32_t count = attributes [ ATTR_POSITION ] - > count ;
size_t stride = sizeof ( ModelVertex ) ;
2023-03-15 02:36:09 +00:00
lovrModelDataCopyAttribute ( data , attributes [ ATTR_POSITION ] , vertexData + 0 , F32 , 3 , false , count , stride , 0 ) ;
2024-01-21 01:37:00 +00:00
lovrModelDataCopyAttribute ( data , attributes [ ATTR_NORMAL ] , vertexData + 12 , SN10x3 , 1 , false , count , stride , 0 ) ;
lovrModelDataCopyAttribute ( data , attributes [ ATTR_UV ] , vertexData + 16 , F32 , 2 , false , count , stride , 0 ) ;
lovrModelDataCopyAttribute ( data , attributes [ ATTR_COLOR ] , vertexData + 24 , U8 , 4 , true , count , stride , 255 ) ;
lovrModelDataCopyAttribute ( data , attributes [ ATTR_TANGENT ] , vertexData + 28 , SN10x3 , 1 , false , count , stride , 0 ) ;
2023-03-15 02:36:09 +00:00
vertexData + = count * stride ;
2022-07-04 00:26:31 +00:00
if ( data - > skinnedVertexCount > 0 & & primitive - > skin ! = ~ 0u ) {
lovrModelDataCopyAttribute ( data , attributes [ ATTR_JOINTS ] , skinData + 0 , U8 , 4 , false , count , 8 , 0 ) ;
lovrModelDataCopyAttribute ( data , attributes [ ATTR_WEIGHTS ] , skinData + 4 , U8 , 4 , true , count , 8 , 0 ) ;
skinData + = count * 8 ;
}
if ( primitive - > indices ) {
2023-03-15 02:36:09 +00:00
char * indices = data - > buffers [ primitive - > indices - > buffer ] . data + primitive - > indices - > offset ;
memcpy ( indexData , indices , primitive - > indices - > count * indexSize ) ;
indexData + = primitive - > indices - > count * indexSize ;
2022-07-04 00:26:31 +00:00
}
}
2023-03-15 02:36:09 +00:00
// Blend shapes
2023-03-15 06:25:49 +00:00
if ( data - > blendShapeCount > 0 ) {
for ( uint32_t i = 0 ; i < data - > blendShapeCount ; i + + ) {
if ( i = = 0 | | data - > blendShapes [ i - 1 ] . node ! = data - > blendShapes [ i ] . node ) {
model - > blendGroupCount + + ;
}
2023-03-15 02:36:09 +00:00
}
2024-03-11 21:38:00 +00:00
model - > blendGroups = lovrMalloc ( model - > blendGroupCount * sizeof ( BlendGroup ) ) ;
model - > blendShapeWeights = lovrMalloc ( data - > blendShapeCount * sizeof ( float ) ) ;
2023-03-15 02:36:09 +00:00
2023-03-15 06:25:49 +00:00
BlendGroup * group = model - > blendGroups ;
2023-03-15 02:36:09 +00:00
2023-03-15 06:25:49 +00:00
for ( uint32_t i = 0 ; i < data - > blendShapeCount ; i + + ) {
ModelBlendShape * blendShape = & data - > blendShapes [ i ] ;
ModelNode * node = & data - > nodes [ blendShape - > node ] ;
uint32_t groupVertexCount = 0 ;
2023-03-15 02:36:09 +00:00
2023-03-15 06:25:49 +00:00
for ( uint32_t p = 0 ; p < node - > primitiveCount ; p + + ) {
ModelPrimitive * primitive = & data - > primitives [ node - > primitiveIndex + p ] ;
uint32_t vertexCount = primitive - > attributes [ ATTR_POSITION ] - > count ;
2023-03-16 07:22:01 +00:00
size_t stride = sizeof ( BlendVertex ) ;
2023-03-15 02:36:09 +00:00
2023-06-21 04:45:58 +00:00
ModelBlendData * blendAttributes = & primitive - > blendShapes [ i - node - > blendShapeIndex ] ;
lovrModelDataCopyAttribute ( data , blendAttributes - > positions , blendData + offsetof ( BlendVertex , position ) , F32 , 3 , false , vertexCount , stride , 0 ) ;
lovrModelDataCopyAttribute ( data , blendAttributes - > normals , blendData + offsetof ( BlendVertex , normal ) , F32 , 3 , false , vertexCount , stride , 0 ) ;
lovrModelDataCopyAttribute ( data , blendAttributes - > tangents , blendData + offsetof ( BlendVertex , tangent ) , F32 , 3 , false , vertexCount , stride , 0 ) ;
2023-03-17 03:31:15 +00:00
blendData + = vertexCount * stride ;
2023-03-15 06:25:49 +00:00
groupVertexCount + = vertexCount ;
2023-03-15 02:36:09 +00:00
}
2023-03-15 06:25:49 +00:00
if ( i = = 0 | | blendShape [ - 1 ] . node ! = blendShape [ 0 ] . node ) {
group - > index = node - > blendShapeIndex ;
group - > count = node - > blendShapeCount ;
group - > vertexIndex = baseVertex [ node - > primitiveIndex ] ;
group - > vertexCount = groupVertexCount ;
group + + ;
}
2023-03-15 02:36:09 +00:00
}
2023-03-16 07:22:01 +00:00
2023-10-18 22:51:52 +00:00
lovrModelResetBlendShapes ( model ) ;
2022-07-04 00:26:31 +00:00
}
2023-03-15 02:36:09 +00:00
// Transforms
2024-03-11 21:38:00 +00:00
model - > localTransforms = lovrMalloc ( sizeof ( NodeTransform ) * data - > nodeCount ) ;
model - > globalTransforms = lovrMalloc ( 16 * sizeof ( float ) * data - > nodeCount ) ;
2022-07-13 02:35:23 +00:00
lovrModelResetNodeTransforms ( model ) ;
2023-05-10 07:47:16 +00:00
2023-04-30 01:33:58 +00:00
tempPop ( & state . allocator , stack ) ;
2022-07-04 00:26:31 +00:00
return model ;
}
2023-05-10 07:47:16 +00:00
Model * lovrModelClone ( Model * parent ) {
ModelData * data = parent - > info . data ;
2024-03-11 21:38:00 +00:00
Model * model = lovrCalloc ( sizeof ( Model ) ) ;
2023-05-10 07:47:16 +00:00
model - > ref = 1 ;
model - > parent = parent ;
model - > info = parent - > info ;
lovrRetain ( parent ) ;
model - > textures = parent - > textures ;
model - > materials = parent - > materials ;
model - > rawVertexBuffer = parent - > rawVertexBuffer ;
model - > indexBuffer = parent - > indexBuffer ;
model - > blendBuffer = parent - > blendBuffer ;
model - > skinBuffer = parent - > skinBuffer ;
model - > blendGroups = parent - > blendGroups ;
model - > blendGroupCount = parent - > blendGroupCount ;
2024-01-29 08:32:44 +00:00
if ( parent - > vertexBuffer ) {
model - > vertexBuffer = lovrBufferCreate ( & parent - > vertexBuffer - > info , NULL ) ;
2023-05-10 07:47:16 +00:00
2024-01-29 08:32:44 +00:00
beginFrame ( ) ;
2023-05-10 07:47:16 +00:00
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & parent - > vertexBuffer - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
2024-01-29 08:32:44 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2023-05-10 07:47:16 +00:00
2024-01-29 08:32:44 +00:00
Buffer * src = parent - > vertexBuffer ;
Buffer * dst = model - > vertexBuffer ;
gpu_copy_buffers ( state . stream , src - > gpu , dst - > gpu , src - > base , dst - > base , parent - > vertexBuffer - > info . size ) ;
2023-05-10 07:47:16 +00:00
2024-01-29 08:32:44 +00:00
gpu_sync ( state . stream , & ( gpu_barrier ) {
2024-01-14 22:51:23 +00:00
. prev = GPU_PHASE_COPY ,
2024-01-29 08:32:44 +00:00
. next = GPU_PHASE_SHADER_COMPUTE ,
. flush = GPU_CACHE_TRANSFER_WRITE ,
. clear = GPU_CACHE_STORAGE_READ | GPU_CACHE_STORAGE_WRITE
} , 1 ) ;
}
2023-05-10 07:47:16 +00:00
2024-03-11 21:38:00 +00:00
model - > draws = lovrMalloc ( data - > primitiveCount * sizeof ( DrawInfo ) ) ;
2023-05-10 07:47:16 +00:00
for ( uint32_t i = 0 ; i < data - > primitiveCount ; i + + ) {
model - > draws [ i ] = parent - > draws [ i ] ;
model - > draws [ i ] . vertex . buffer = model - > vertexBuffer ;
}
2024-03-11 21:38:00 +00:00
model - > blendShapeWeights = lovrMalloc ( data - > blendShapeCount * sizeof ( float ) ) ;
2023-10-18 22:51:52 +00:00
lovrModelResetBlendShapes ( model ) ;
2023-09-25 06:51:42 +00:00
2024-03-11 21:38:00 +00:00
model - > localTransforms = lovrMalloc ( sizeof ( NodeTransform ) * data - > nodeCount ) ;
model - > globalTransforms = lovrMalloc ( 16 * sizeof ( float ) * data - > nodeCount ) ;
2023-05-10 07:47:16 +00:00
lovrModelResetNodeTransforms ( model ) ;
return model ;
}
2022-07-04 00:26:31 +00:00
void lovrModelDestroy ( void * ref ) {
Model * model = ref ;
2023-05-10 07:47:16 +00:00
if ( model - > parent ) {
lovrRelease ( model - > parent , lovrModelDestroy ) ;
lovrRelease ( model - > vertexBuffer , lovrBufferDestroy ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( model - > localTransforms ) ;
lovrFree ( model - > globalTransforms ) ;
lovrFree ( model - > blendShapeWeights ) ;
lovrFree ( model - > meshes ) ;
lovrFree ( model - > draws ) ;
lovrFree ( model ) ;
2023-05-10 07:47:16 +00:00
return ;
}
2022-07-04 00:26:31 +00:00
ModelData * data = model - > info . data ;
2023-06-10 01:32:54 +00:00
if ( model - > info . materials ) {
for ( uint32_t i = 0 ; i < data - > materialCount ; i + + ) {
lovrRelease ( model - > materials [ i ] , lovrMaterialDestroy ) ;
}
for ( uint32_t i = 0 ; i < data - > imageCount ; i + + ) {
lovrRelease ( model - > textures [ i ] , lovrTextureDestroy ) ;
}
2024-03-11 21:38:00 +00:00
lovrFree ( model - > materials ) ;
lovrFree ( model - > textures ) ;
2022-07-04 00:26:31 +00:00
}
lovrRelease ( model - > rawVertexBuffer , lovrBufferDestroy ) ;
lovrRelease ( model - > vertexBuffer , lovrBufferDestroy ) ;
lovrRelease ( model - > indexBuffer , lovrBufferDestroy ) ;
2023-03-17 03:31:15 +00:00
lovrRelease ( model - > blendBuffer , lovrBufferDestroy ) ;
2022-07-04 00:26:31 +00:00
lovrRelease ( model - > skinBuffer , lovrBufferDestroy ) ;
lovrRelease ( model - > info . data , lovrModelDataDestroy ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( model - > localTransforms ) ;
lovrFree ( model - > globalTransforms ) ;
lovrFree ( model - > boundingBoxes ) ;
lovrFree ( model - > blendShapeWeights ) ;
lovrFree ( model - > blendGroups ) ;
lovrFree ( model - > meshes ) ;
lovrFree ( model - > draws ) ;
lovrFree ( model ) ;
2022-07-04 00:26:31 +00:00
}
2022-07-07 02:22:02 +00:00
const ModelInfo * lovrModelGetInfo ( Model * model ) {
return & model - > info ;
2022-07-04 00:26:31 +00:00
}
2022-07-13 02:35:23 +00:00
void lovrModelResetNodeTransforms ( Model * model ) {
2022-07-04 00:26:31 +00:00
ModelData * data = model - > info . data ;
for ( uint32_t i = 0 ; i < data - > nodeCount ; i + + ) {
2024-01-26 18:47:54 +00:00
NodeTransform * transform = & model - > localTransforms [ i ] ;
2022-08-03 06:03:52 +00:00
if ( data - > nodes [ i ] . hasMatrix ) {
2024-01-26 18:47:54 +00:00
mat4_getPosition ( data - > nodes [ i ] . transform . matrix , transform - > position ) ;
mat4_getOrientation ( data - > nodes [ i ] . transform . matrix , transform - > rotation ) ;
mat4_getScale ( data - > nodes [ i ] . transform . matrix , transform - > scale ) ;
2022-07-04 00:26:31 +00:00
} else {
2024-01-26 18:47:54 +00:00
vec3_init ( transform - > position , data - > nodes [ i ] . transform . translation ) ;
quat_init ( transform - > rotation , data - > nodes [ i ] . transform . rotation ) ;
vec3_init ( transform - > scale , data - > nodes [ i ] . transform . scale ) ;
2022-07-04 00:26:31 +00:00
}
}
model - > transformsDirty = true ;
}
2023-10-18 22:51:52 +00:00
void lovrModelResetBlendShapes ( Model * model ) {
ModelData * data = model - > info . data ;
for ( uint32_t i = 0 ; i < data - > blendShapeCount ; i + + ) {
model - > blendShapeWeights [ i ] = data - > blendShapes [ i ] . weight ;
}
model - > blendShapesDirty = true ;
}
2022-07-04 00:26:31 +00:00
void lovrModelAnimate ( Model * model , uint32_t animationIndex , float time , float alpha ) {
if ( alpha < = 0.f ) return ;
ModelData * data = model - > info . data ;
2024-03-10 07:22:32 +00:00
lovrCheck ( animationIndex < data - > animationCount , " Invalid animation index '%d' (Model has %d animation%s) " , animationIndex + 1 , data - > animationCount , data - > animationCount = = 1 ? " " : " s " ) ;
2022-07-04 00:26:31 +00:00
ModelAnimation * animation = & data - > animations [ animationIndex ] ;
time = fmodf ( time , animation - > duration ) ;
2023-04-30 01:33:58 +00:00
size_t stack = tempPush ( & state . allocator ) ;
2023-03-15 02:57:57 +00:00
2022-07-04 00:26:31 +00:00
for ( uint32_t i = 0 ; i < animation - > channelCount ; i + + ) {
ModelAnimationChannel * channel = & animation - > channels [ i ] ;
uint32_t node = channel - > nodeIndex ;
uint32_t keyframe = 0 ;
while ( keyframe < channel - > keyframeCount & & channel - > times [ keyframe ] < time ) {
keyframe + + ;
}
2023-03-15 02:57:57 +00:00
size_t n ;
switch ( channel - > property ) {
case PROP_TRANSLATION : n = 3 ; break ;
case PROP_SCALE : n = 3 ; break ;
case PROP_ROTATION : n = 4 ; break ;
2024-01-26 18:47:54 +00:00
case PROP_WEIGHTS : n = data - > nodes [ node ] . blendShapeCount ; break ;
2023-03-15 02:57:57 +00:00
}
2022-07-04 00:26:31 +00:00
2024-01-26 18:47:54 +00:00
float * property = tempAlloc ( & state . allocator , n * sizeof ( float ) ) ;
2022-07-04 00:26:31 +00:00
// Handle the first/last keyframe case (no interpolation)
if ( keyframe = = 0 | | keyframe > = channel - > keyframeCount ) {
size_t index = MIN ( keyframe , channel - > keyframeCount - 1 ) ;
// For cubic interpolation, each keyframe has 3 parts, and the actual data is in the middle
if ( channel - > smoothing = = SMOOTH_CUBIC ) {
index = 3 * index + 1 ;
}
memcpy ( property , channel - > data + index * n , n * sizeof ( float ) ) ;
} else {
float t1 = channel - > times [ keyframe - 1 ] ;
float t2 = channel - > times [ keyframe ] ;
float z = ( time - t1 ) / ( t2 - t1 ) ;
switch ( channel - > smoothing ) {
case SMOOTH_STEP :
memcpy ( property , channel - > data + ( z > = .5f ? keyframe : keyframe - 1 ) * n , n * sizeof ( float ) ) ;
break ;
case SMOOTH_LINEAR :
memcpy ( property , channel - > data + ( keyframe - 1 ) * n , n * sizeof ( float ) ) ;
2023-03-15 02:57:57 +00:00
if ( channel - > property = = PROP_ROTATION ) {
quat_slerp ( property , channel - > data + keyframe * n , z ) ;
} else {
float * target = channel - > data + keyframe * n ;
for ( uint32_t i = 0 ; i < n ; i + + ) {
property [ i ] + = ( target [ i ] - property [ i ] ) * z ;
}
}
2022-07-04 00:26:31 +00:00
break ;
case SMOOTH_CUBIC : {
size_t stride = 3 * n ;
float * p0 = channel - > data + ( keyframe - 1 ) * stride + 1 * n ;
float * m0 = channel - > data + ( keyframe - 1 ) * stride + 2 * n ;
float * p1 = channel - > data + ( keyframe - 0 ) * stride + 1 * n ;
float * m1 = channel - > data + ( keyframe - 0 ) * stride + 0 * n ;
float dt = t2 - t1 ;
float z2 = z * z ;
float z3 = z2 * z ;
float a = 2.f * z3 - 3.f * z2 + 1.f ;
float b = 2.f * z3 - 3.f * z2 + 1.f ;
float c = - 2.f * z3 + 3.f * z2 ;
float d = ( z3 * - z2 ) * dt ;
for ( size_t j = 0 ; j < n ; j + + ) {
property [ j ] = a * p0 [ j ] + b * m0 [ j ] + c * p1 [ j ] + d * m1 [ j ] ;
}
break ;
}
default : break ;
}
}
2023-03-16 07:22:01 +00:00
if ( channel - > property = = PROP_WEIGHTS ) {
model - > blendShapesDirty = true ;
} else {
model - > transformsDirty = true ;
}
2023-03-15 02:57:57 +00:00
2024-01-26 18:47:54 +00:00
float * dst ;
switch ( channel - > property ) {
case PROP_TRANSLATION : dst = model - > localTransforms [ node ] . position ; break ;
case PROP_SCALE : dst = model - > localTransforms [ node ] . scale ; break ;
case PROP_ROTATION : dst = model - > localTransforms [ node ] . rotation ; break ;
case PROP_WEIGHTS : dst = & model - > blendShapeWeights [ data - > nodes [ node ] . blendShapeIndex ] ; break ;
}
2022-07-04 00:26:31 +00:00
if ( alpha > = 1.f ) {
2023-03-15 02:57:57 +00:00
memcpy ( dst , property , n * sizeof ( float ) ) ;
2022-07-04 00:26:31 +00:00
} else {
2023-03-15 02:57:57 +00:00
for ( uint32_t i = 0 ; i < n ; i + + ) {
dst [ i ] + = ( property [ i ] - dst [ i ] ) * alpha ;
}
2022-07-04 00:26:31 +00:00
}
}
2023-04-30 01:33:58 +00:00
tempPop ( & state . allocator , stack ) ;
2022-07-04 00:26:31 +00:00
}
2023-03-15 06:25:49 +00:00
float lovrModelGetBlendShapeWeight ( Model * model , uint32_t index ) {
return model - > blendShapeWeights [ index ] ;
2023-03-15 02:58:14 +00:00
}
2023-03-15 06:25:49 +00:00
void lovrModelSetBlendShapeWeight ( Model * model , uint32_t index , float weight ) {
model - > blendShapeWeights [ index ] = weight ;
2023-03-16 07:22:01 +00:00
model - > blendShapesDirty = true ;
2023-03-15 02:58:14 +00:00
}
2023-07-11 00:51:24 +00:00
void lovrModelGetNodeTransform ( Model * model , uint32_t node , float position [ 3 ] , float scale [ 3 ] , float rotation [ 4 ] , OriginType origin ) {
2022-08-18 05:33:43 +00:00
if ( origin = = ORIGIN_PARENT ) {
2024-01-26 18:47:54 +00:00
vec3_init ( position , model - > localTransforms [ node ] . position ) ;
vec3_init ( scale , model - > localTransforms [ node ] . scale ) ;
quat_init ( rotation , model - > localTransforms [ node ] . rotation ) ;
2022-07-04 00:26:31 +00:00
} else {
if ( model - > transformsDirty ) {
2022-07-13 02:35:23 +00:00
updateModelTransforms ( model , model - > info . data - > rootNode , ( float [ ] ) MAT4_IDENTITY ) ;
2022-07-04 00:26:31 +00:00
model - > transformsDirty = false ;
}
mat4_getPosition ( model - > globalTransforms + 16 * node , position ) ;
2022-07-13 02:35:23 +00:00
mat4_getScale ( model - > globalTransforms + 16 * node , scale ) ;
2022-07-04 00:26:31 +00:00
mat4_getOrientation ( model - > globalTransforms + 16 * node , rotation ) ;
}
}
2023-07-11 00:51:24 +00:00
void lovrModelSetNodeTransform ( Model * model , uint32_t node , float position [ 3 ] , float scale [ 3 ] , float rotation [ 4 ] , float alpha ) {
2022-07-04 00:26:31 +00:00
if ( alpha < = 0.f ) return ;
NodeTransform * transform = & model - > localTransforms [ node ] ;
if ( alpha > = 1.f ) {
2024-01-26 18:47:54 +00:00
if ( position ) vec3_init ( transform - > position , position ) ;
if ( scale ) vec3_init ( transform - > scale , scale ) ;
if ( rotation ) quat_init ( transform - > rotation , rotation ) ;
2022-07-04 00:26:31 +00:00
} else {
2024-01-26 18:47:54 +00:00
if ( position ) vec3_lerp ( transform - > position , position , alpha ) ;
if ( scale ) vec3_lerp ( transform - > scale , scale , alpha ) ;
if ( rotation ) quat_slerp ( transform - > rotation , rotation , alpha ) ;
2022-07-04 00:26:31 +00:00
}
model - > transformsDirty = true ;
}
2023-06-11 05:06:29 +00:00
Buffer * lovrModelGetVertexBuffer ( Model * model ) {
return model - > rawVertexBuffer ;
}
Buffer * lovrModelGetIndexBuffer ( Model * model ) {
return model - > indexBuffer ;
}
Mesh * lovrModelGetMesh ( Model * model , uint32_t index ) {
ModelData * data = model - > info . data ;
lovrCheck ( index < data - > primitiveCount , " Invalid mesh index '%d' (Model has %d mesh%s) " , index + 1 , data - > primitiveCount , data - > primitiveCount = = 1 ? " " : " es " ) ;
if ( ! model - > meshes ) {
2024-03-11 21:38:00 +00:00
model - > meshes = lovrCalloc ( data - > primitiveCount * sizeof ( Mesh * ) ) ;
2023-06-11 05:06:29 +00:00
}
if ( ! model - > meshes [ index ] ) {
DrawInfo * draw = & model - > draws [ index ] ;
MeshInfo info = { . vertexBuffer = model - > vertexBuffer , . storage = MESH_GPU } ;
Mesh * mesh = lovrMeshCreate ( & info , NULL ) ;
if ( draw - > index . buffer ) lovrMeshSetIndexBuffer ( mesh , model - > indexBuffer ) ;
lovrMeshSetDrawMode ( mesh , draw - > mode ) ;
2024-01-18 00:05:37 +00:00
lovrMeshSetDrawRange ( mesh , draw - > start , draw - > count , draw - > baseVertex ) ;
2023-06-11 05:06:29 +00:00
lovrMeshSetMaterial ( mesh , draw - > material ) ;
memcpy ( mesh - > bounds , draw - > bounds , sizeof ( mesh - > bounds ) ) ;
mesh - > hasBounds = true ;
model - > meshes [ index ] = mesh ;
}
return model - > meshes [ index ] ;
}
2022-07-04 00:26:31 +00:00
Texture * lovrModelGetTexture ( Model * model , uint32_t index ) {
ModelData * data = model - > info . data ;
2024-03-10 07:22:32 +00:00
lovrCheck ( index < data - > imageCount , " Invalid texture index '%d' (Model has %d texture%s) " , index + 1 , data - > imageCount , data - > imageCount = = 1 ? " " : " s " ) ;
2022-07-04 00:26:31 +00:00
return model - > textures [ index ] ;
}
Material * lovrModelGetMaterial ( Model * model , uint32_t index ) {
ModelData * data = model - > info . data ;
2024-03-10 07:22:32 +00:00
lovrCheck ( index < data - > materialCount , " Invalid material index '%d' (Model has %d material%s) " , index + 1 , data - > materialCount , data - > materialCount = = 1 ? " " : " s " ) ;
2022-07-04 00:26:31 +00:00
return model - > materials [ index ] ;
}
2023-04-01 01:16:54 +00:00
static void lovrModelAnimateVertices ( Model * model ) {
2022-07-04 00:26:31 +00:00
ModelData * data = model - > info . data ;
2023-04-01 01:16:54 +00:00
bool blend = model - > blendGroupCount > 0 ;
bool skin = data - > skinCount > 0 ;
2023-06-21 04:46:04 +00:00
beginFrame ( ) ;
2023-04-01 01:16:54 +00:00
if ( ( ! blend & & ! skin ) | | ( ! model - > transformsDirty & & ! model - > blendShapesDirty ) | | model - > lastVertexAnimation = = state . tick ) {
2022-07-04 00:26:31 +00:00
return ;
}
2023-04-01 01:16:54 +00:00
if ( model - > transformsDirty ) {
updateModelTransforms ( model , model - > info . data - > rootNode , ( float [ ] ) MAT4_IDENTITY ) ;
model - > transformsDirty = false ;
2022-07-04 00:26:31 +00:00
}
2023-04-01 01:16:54 +00:00
if ( blend ) {
2023-04-27 04:36:30 +00:00
Shader * shader = lovrGraphicsGetDefaultShader ( SHADER_BLENDER ) ;
2023-06-17 02:18:48 +00:00
uint32_t vertexCount = data - > dynamicVertexCount ;
2023-04-01 01:16:54 +00:00
uint32_t blendBufferCursor = 0 ;
2023-04-01 01:18:03 +00:00
uint32_t chunkSize = 64 ;
2023-04-01 01:16:54 +00:00
gpu_binding bindings [ ] = {
2023-12-30 22:17:20 +00:00
{ 0 , GPU_SLOT_STORAGE_BUFFER , . buffer = { model - > rawVertexBuffer - > gpu , model - > rawVertexBuffer - > base , vertexCount * sizeof ( ModelVertex ) } } ,
{ 1 , GPU_SLOT_STORAGE_BUFFER , . buffer = { model - > vertexBuffer - > gpu , model - > vertexBuffer - > base , vertexCount * sizeof ( ModelVertex ) } } ,
{ 2 , GPU_SLOT_STORAGE_BUFFER , . buffer = { model - > blendBuffer - > gpu , model - > blendBuffer - > base , model - > blendBuffer - > info . size } } ,
2023-06-17 02:18:48 +00:00
{ 3 , GPU_SLOT_UNIFORM_BUFFER , . buffer = { NULL , 0 , chunkSize * sizeof ( float ) } }
2023-04-01 01:16:54 +00:00
} ;
2022-07-04 00:26:31 +00:00
2023-04-01 01:16:54 +00:00
gpu_compute_begin ( state . stream ) ;
gpu_bind_pipeline ( state . stream , shader - > computePipeline , GPU_PIPELINE_COMPUTE ) ;
2022-07-04 00:26:31 +00:00
2023-04-01 01:16:54 +00:00
for ( uint32_t i = 0 ; i < model - > blendGroupCount ; i + + ) {
BlendGroup * group = & model - > blendGroups [ i ] ;
2023-03-16 04:06:48 +00:00
2023-04-01 01:16:54 +00:00
for ( uint32_t j = 0 ; j < group - > count ; j + = chunkSize ) {
uint32_t count = MIN ( group - > count - j , chunkSize ) ;
2023-06-21 04:42:44 +00:00
bool first = j = = 0 ;
2022-07-04 00:26:31 +00:00
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_STREAM , chunkSize * sizeof ( float ) , state . limits . uniformBufferAlign ) ;
memcpy ( view . pointer , model - > blendShapeWeights + group - > index + j , count * sizeof ( float ) ) ;
bindings [ 3 ] . buffer = ( gpu_buffer_binding ) { view . buffer , view . offset , view . extent } ;
2022-07-04 00:26:31 +00:00
2023-10-02 16:07:50 +00:00
gpu_bundle * bundle = getBundle ( shader - > layout , bindings , COUNTOF ( bindings ) ) ;
2023-06-21 04:42:44 +00:00
uint32_t constants [ ] = { group - > vertexIndex , group - > vertexCount , count , blendBufferCursor , first } ;
2023-04-01 01:16:54 +00:00
uint32_t subgroupSize = state . device . subgroupSize ;
2022-07-04 00:26:31 +00:00
2023-04-01 01:16:54 +00:00
gpu_bind_bundles ( state . stream , shader - > gpu , & bundle , 0 , 1 , NULL , 0 ) ;
2023-10-02 16:07:50 +00:00
gpu_push_constants ( state . stream , shader - > gpu , constants , sizeof ( constants ) ) ;
2023-04-01 01:16:54 +00:00
gpu_compute ( state . stream , ( group - > vertexCount + subgroupSize - 1 ) / subgroupSize , 1 , 1 ) ;
2022-07-04 00:26:31 +00:00
2023-04-01 01:16:54 +00:00
if ( j + count < group - > count ) {
gpu_sync ( state . stream , & ( gpu_barrier ) {
. prev = GPU_PHASE_SHADER_COMPUTE ,
. next = GPU_PHASE_SHADER_COMPUTE ,
. flush = GPU_CACHE_STORAGE_WRITE ,
. clear = GPU_CACHE_STORAGE_READ
} , 1 ) ;
}
2023-03-16 04:06:48 +00:00
2023-04-01 01:16:54 +00:00
blendBufferCursor + = group - > vertexCount * count ;
}
}
2023-03-16 07:22:01 +00:00
2023-04-01 01:16:54 +00:00
model - > blendShapesDirty = false ;
2023-03-16 07:22:01 +00:00
}
2023-04-01 01:16:54 +00:00
if ( skin ) {
if ( blend ) {
gpu_sync ( state . stream , & ( gpu_barrier ) {
. prev = GPU_PHASE_SHADER_COMPUTE ,
. next = GPU_PHASE_SHADER_COMPUTE ,
. flush = GPU_CACHE_STORAGE_WRITE ,
. clear = GPU_CACHE_STORAGE_READ | GPU_CACHE_STORAGE_WRITE
} , 1 ) ;
} else {
gpu_compute_begin ( state . stream ) ;
}
2023-03-16 07:22:01 +00:00
2023-04-27 04:36:30 +00:00
Shader * shader = lovrGraphicsGetDefaultShader ( SHADER_ANIMATOR ) ;
2023-12-30 22:17:20 +00:00
Buffer * sourceBuffer = blend ? model - > vertexBuffer : model - > rawVertexBuffer ;
2023-03-16 07:22:01 +00:00
2023-04-01 01:16:54 +00:00
uint32_t count = data - > skinnedVertexCount ;
2023-03-16 07:22:01 +00:00
2023-04-01 01:16:54 +00:00
gpu_binding bindings [ ] = {
2023-12-30 22:17:20 +00:00
{ 0 , GPU_SLOT_STORAGE_BUFFER , . buffer = { sourceBuffer - > gpu , sourceBuffer - > base , count * sizeof ( ModelVertex ) } } ,
2024-01-14 02:01:04 +00:00
{ 1 , GPU_SLOT_STORAGE_BUFFER , . buffer = { model - > vertexBuffer - > gpu , model - > vertexBuffer - > base , count * sizeof ( ModelVertex ) } } ,
{ 2 , GPU_SLOT_STORAGE_BUFFER , . buffer = { model - > skinBuffer - > gpu , model - > skinBuffer - > base , count * 8 } } ,
2023-04-28 02:48:12 +00:00
{ 3 , GPU_SLOT_UNIFORM_BUFFER , . buffer = { NULL , 0 , 0 } } // Filled in for each skin
2023-04-01 01:16:54 +00:00
} ;
2023-03-16 07:22:01 +00:00
2023-04-01 01:16:54 +00:00
gpu_bind_pipeline ( state . stream , shader - > computePipeline , GPU_PIPELINE_COMPUTE ) ;
2023-03-16 07:22:01 +00:00
2023-04-01 01:16:54 +00:00
for ( uint32_t i = 0 , baseVertex = 0 ; i < data - > skinCount ; i + + ) {
ModelSkin * skin = & data - > skins [ i ] ;
2023-03-16 07:22:01 +00:00
2023-04-28 02:48:12 +00:00
uint32_t align = state . limits . uniformBufferAlign ;
2023-12-30 20:39:50 +00:00
BufferView view = getBuffer ( GPU_BUFFER_STREAM , skin - > jointCount * 16 * sizeof ( float ) , align ) ;
bindings [ 3 ] . buffer = ( gpu_buffer_binding ) { view . buffer , view . offset , view . extent } ;
2023-04-28 02:48:12 +00:00
2023-04-01 01:16:54 +00:00
float transform [ 16 ] ;
2023-12-30 20:39:50 +00:00
float * joints = view . pointer ;
2023-04-01 01:16:54 +00:00
for ( uint32_t j = 0 ; j < skin - > jointCount ; j + + ) {
mat4_init ( transform , model - > globalTransforms + 16 * skin - > joints [ j ] ) ;
mat4_mul ( transform , skin - > inverseBindMatrices + 16 * j ) ;
2023-04-28 02:48:12 +00:00
memcpy ( joints , transform , sizeof ( transform ) ) ;
joints + = 16 ;
2023-04-01 01:16:54 +00:00
}
2023-10-02 16:07:50 +00:00
gpu_bundle * bundle = getBundle ( shader - > layout , bindings , COUNTOF ( bindings ) ) ;
2023-04-01 01:28:33 +00:00
gpu_bind_bundles ( state . stream , shader - > gpu , & bundle , 0 , 1 , NULL , 0 ) ;
2023-03-16 07:22:01 +00:00
uint32_t subgroupSize = state . device . subgroupSize ;
2023-04-01 01:28:33 +00:00
uint32_t maxVerticesPerDispatch = state . limits . workgroupCount [ 0 ] * subgroupSize ;
uint32_t verticesRemaining = skin - > vertexCount ;
while ( verticesRemaining > 0 ) {
uint32_t vertexCount = MIN ( verticesRemaining , maxVerticesPerDispatch ) ;
gpu_push_constants ( state . stream , shader - > gpu , ( uint32_t [ 2 ] ) { baseVertex , vertexCount } , 8 ) ;
gpu_compute ( state . stream , ( vertexCount + subgroupSize - 1 ) / subgroupSize , 1 , 1 ) ;
verticesRemaining - = vertexCount ;
baseVertex + = vertexCount ;
}
2023-03-16 07:22:01 +00:00
}
}
gpu_compute_end ( state . stream ) ;
2024-03-02 01:13:06 +00:00
state . barrier . prev | = GPU_PHASE_SHADER_COMPUTE ;
state . barrier . next | = GPU_PHASE_INPUT_VERTEX ;
state . barrier . flush | = GPU_CACHE_STORAGE_WRITE ;
state . barrier . clear | = GPU_CACHE_VERTEX ;
2023-04-01 01:16:54 +00:00
model - > lastVertexAnimation = state . tick ;
2022-07-04 00:26:31 +00:00
}
2022-07-13 07:07:15 +00:00
// Readback
2023-05-07 03:00:43 +00:00
static Readback * lovrReadbackCreate ( ReadbackType type ) {
2023-04-30 01:25:58 +00:00
beginFrame ( ) ;
2024-03-11 21:38:00 +00:00
Readback * readback = lovrCalloc ( sizeof ( Readback ) ) ;
2022-07-14 07:05:58 +00:00
readback - > ref = 1 ;
readback - > tick = state . tick ;
2023-05-07 03:00:43 +00:00
readback - > type = type ;
if ( ! state . oldestReadback ) state . oldestReadback = readback ;
if ( state . newestReadback ) state . newestReadback - > next = readback ;
2023-05-07 06:36:33 +00:00
state . newestReadback = readback ;
lovrRetain ( readback ) ;
2023-05-07 03:00:43 +00:00
return readback ;
}
Readback * lovrReadbackCreateBuffer ( Buffer * buffer , uint32_t offset , uint32_t extent ) {
if ( extent = = ~ 0u ) extent = buffer - > info . size - offset ;
lovrCheck ( offset + extent < = buffer - > info . size , " Tried to read past the end of the Buffer " ) ;
2023-10-04 14:43:00 +00:00
lovrCheck ( ! buffer - > info . format | | offset % buffer - > info . format - > stride = = 0 , " Readback offset must be a multiple of Buffer's stride " ) ;
lovrCheck ( ! buffer - > info . format | | extent % buffer - > info . format - > stride = = 0 , " Readback size must be a multiple of Buffer's stride " ) ;
2023-05-07 03:00:43 +00:00
Readback * readback = lovrReadbackCreate ( READBACK_BUFFER ) ;
2023-04-30 01:25:58 +00:00
readback - > buffer = buffer ;
2024-03-11 21:38:00 +00:00
void * data = lovrMalloc ( extent ) ;
2023-05-07 06:36:33 +00:00
readback - > blob = lovrBlobCreate ( data , extent , " Readback " ) ;
2023-12-30 20:39:50 +00:00
readback - > view = getBuffer ( GPU_BUFFER_DOWNLOAD , extent , 4 ) ;
2023-04-30 01:25:58 +00:00
lovrRetain ( buffer ) ;
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & buffer - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2023-12-30 22:17:20 +00:00
gpu_copy_buffers ( state . stream , buffer - > gpu , readback - > view . buffer , buffer - > base + offset , readback - > view . offset , extent ) ;
2023-04-30 01:25:58 +00:00
return readback ;
}
2022-07-14 07:05:58 +00:00
2023-10-03 13:41:37 +00:00
Readback * lovrReadbackCreateTexture ( Texture * texture , uint32_t offset [ 4 ] , uint32_t extent [ 3 ] ) {
2023-04-30 01:25:58 +00:00
if ( extent [ 0 ] = = ~ 0u ) extent [ 0 ] = texture - > info . width - offset [ 0 ] ;
if ( extent [ 1 ] = = ~ 0u ) extent [ 1 ] = texture - > info . height - offset [ 1 ] ;
lovrCheck ( extent [ 2 ] = = 1 , " Currently, only one layer can be read from a Texture " ) ;
2024-02-26 23:08:34 +00:00
lovrCheck ( texture - > root = = texture , " Can not read from a Texture view " ) ;
2023-04-30 01:25:58 +00:00
lovrCheck ( texture - > info . usage & TEXTURE_TRANSFER , " Texture must be created with the 'transfer' usage to read from it " ) ;
checkTextureBounds ( & texture - > info , offset , extent ) ;
2023-05-07 03:00:43 +00:00
Readback * readback = lovrReadbackCreate ( READBACK_TEXTURE ) ;
2023-04-30 01:25:58 +00:00
readback - > texture = texture ;
2023-11-02 20:38:21 +00:00
readback - > image = lovrImageCreateRaw ( extent [ 0 ] , extent [ 1 ] , texture - > info . format , texture - > info . srgb ) ;
2023-12-30 20:39:50 +00:00
readback - > view = getBuffer ( GPU_BUFFER_DOWNLOAD , measureTexture ( texture - > info . format , extent [ 0 ] , extent [ 1 ] , 1 ) , 64 ) ;
2023-04-30 01:25:58 +00:00
lovrRetain ( texture ) ;
2024-01-14 22:51:23 +00:00
gpu_barrier barrier = syncTransfer ( & texture - > sync , GPU_PHASE_COPY , GPU_CACHE_TRANSFER_READ ) ;
2023-04-30 01:25:58 +00:00
gpu_sync ( state . stream , & barrier , 1 ) ;
2023-12-30 20:39:50 +00:00
gpu_copy_texture_buffer ( state . stream , texture - > gpu , readback - > view . buffer , offset , readback - > view . offset , extent ) ;
2022-07-14 07:05:58 +00:00
return readback ;
}
2023-12-30 20:39:50 +00:00
static Readback * lovrReadbackCreateTimestamp ( TimingInfo * times , uint32_t count , BufferView buffer ) {
2023-05-07 06:36:33 +00:00
Readback * readback = lovrReadbackCreate ( READBACK_TIMESTAMP ) ;
2023-12-30 20:39:50 +00:00
readback - > view = buffer ;
2023-05-07 06:36:33 +00:00
readback - > times = times ;
readback - > count = count ;
return readback ;
}
2022-07-14 07:05:58 +00:00
void lovrReadbackDestroy ( void * ref ) {
Readback * readback = ref ;
2023-04-30 01:25:58 +00:00
switch ( readback - > type ) {
2023-05-07 06:36:33 +00:00
case READBACK_BUFFER :
lovrRelease ( readback - > buffer , lovrBufferDestroy ) ;
lovrRelease ( readback - > blob , lovrBlobDestroy ) ;
break ;
case READBACK_TEXTURE :
lovrRelease ( readback - > texture , lovrTextureDestroy ) ;
lovrRelease ( readback - > image , lovrImageDestroy ) ;
break ;
case READBACK_TIMESTAMP :
for ( uint32_t i = 0 ; i < readback - > count ; i + + ) {
lovrRelease ( readback - > times [ i ] . pass , lovrPassDestroy ) ;
}
2024-03-11 21:38:00 +00:00
lovrFree ( readback - > times ) ;
2023-05-07 06:36:33 +00:00
break ;
default : break ;
2022-07-15 02:23:02 +00:00
}
2024-03-11 21:38:00 +00:00
lovrFree ( readback ) ;
2022-07-14 07:05:58 +00:00
}
bool lovrReadbackIsComplete ( Readback * readback ) {
return gpu_is_complete ( readback - > tick ) ;
}
bool lovrReadbackWait ( Readback * readback ) {
2023-10-04 14:43:00 +00:00
if ( lovrReadbackIsComplete ( readback ) ) {
2022-07-14 07:05:58 +00:00
return false ;
}
2023-10-04 14:43:00 +00:00
if ( readback - > tick = = state . tick & & state . active ) {
lovrGraphicsSubmit ( NULL , 0 ) ;
}
2022-11-22 03:42:23 +00:00
beginFrame ( ) ;
2022-07-14 07:05:58 +00:00
bool waited = gpu_wait_tick ( readback - > tick ) ;
if ( waited ) {
processReadbacks ( ) ;
}
return waited ;
}
2023-10-04 14:43:00 +00:00
void * lovrReadbackGetData ( Readback * readback , DataField * * format , uint32_t * count ) {
2023-04-30 01:25:58 +00:00
if ( ! lovrReadbackIsComplete ( readback ) ) return NULL ;
2023-10-04 14:43:00 +00:00
if ( readback - > type = = READBACK_BUFFER & & readback - > buffer - > info . format ) {
2023-08-25 20:42:09 +00:00
* format = readback - > buffer - > info . format ;
2023-11-27 02:15:53 +00:00
* count = ( uint32_t ) ( readback - > blob - > size / readback - > buffer - > info . format - > stride ) ;
2023-05-07 06:36:33 +00:00
return readback - > blob - > data ;
2023-04-30 01:25:58 +00:00
}
return NULL ;
2022-07-14 07:05:58 +00:00
}
2022-07-13 07:07:15 +00:00
2022-07-17 16:50:15 +00:00
Blob * lovrReadbackGetBlob ( Readback * readback ) {
return lovrReadbackIsComplete ( readback ) ? readback - > blob : NULL ;
}
2022-07-14 07:05:58 +00:00
Image * lovrReadbackGetImage ( Readback * readback ) {
return lovrReadbackIsComplete ( readback ) ? readback - > image : NULL ;
2022-07-13 07:07:15 +00:00
}
2023-04-30 06:02:37 +00:00
// Pass
static void * lovrPassAllocate ( Pass * pass , size_t size ) {
return tempAlloc ( & pass - > allocator , size ) ;
}
2023-12-30 20:39:50 +00:00
static BufferView lovrPassGetBuffer ( Pass * pass , uint32_t size , size_t align ) {
return allocateBuffer ( & pass - > buffers , GPU_BUFFER_STREAM , size , align ) ;
}
2023-04-30 06:02:37 +00:00
static void lovrPassRelease ( Pass * pass ) {
2024-02-08 19:32:01 +00:00
// Chain all of the Pass's full buffers onto the end of the global freelist
if ( pass - > buffers . freelist ) {
BufferBlock * * list = & state . bufferAllocators [ GPU_BUFFER_STREAM ] . freelist ;
while ( * list ) list = ( BufferBlock * * ) & ( * list ) - > next ;
* list = pass - > buffers . freelist ;
pass - > buffers . freelist = NULL ;
2023-12-30 20:39:50 +00:00
}
2023-04-30 06:02:37 +00:00
if ( pass - > pipeline ) {
for ( uint32_t i = 0 ; i < = pass - > pipelineIndex ; i + + ) {
lovrRelease ( pass - > pipeline - > material , lovrMaterialDestroy ) ;
lovrRelease ( pass - > pipeline - > shader , lovrShaderDestroy ) ;
lovrRelease ( pass - > pipeline - > font , lovrFontDestroy ) ;
pass - > pipeline - - ;
}
pass - > pipelineIndex = 0 ;
}
lovrRelease ( pass - > sampler , lovrSamplerDestroy ) ;
2022-04-29 05:30:31 +00:00
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < pass - > computeCount ; i + + ) {
lovrRelease ( pass - > computes [ i ] . shader , lovrShaderDestroy ) ;
}
2022-08-26 04:57:15 +00:00
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < pass - > drawCount ; i + + ) {
2023-11-18 10:02:05 +00:00
Draw * draw = & pass - > draws [ i ] ;
2023-04-30 06:02:37 +00:00
lovrRelease ( draw - > shader , lovrShaderDestroy ) ;
lovrRelease ( draw - > material , lovrMaterialDestroy ) ;
}
2022-11-08 06:45:10 +00:00
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < COUNTOF ( pass - > access ) ; i + + ) {
for ( AccessBlock * block = pass - > access [ i ] ; block ! = NULL ; block = block - > next ) {
for ( uint32_t j = 0 ; j < block - > count ; j + + ) {
2023-12-01 03:28:06 +00:00
bool texture = block - > textureMask & ( 1ull < < j ) ;
lovrRelease ( block - > list [ j ] . object , texture ? lovrTextureDestroy : lovrBufferDestroy ) ;
2023-04-30 06:02:37 +00:00
}
2022-11-08 06:45:10 +00:00
}
2023-04-30 06:02:37 +00:00
}
}
2022-11-08 06:45:10 +00:00
2023-04-30 06:02:37 +00:00
Pass * lovrGraphicsGetWindowPass ( void ) {
if ( ! state . windowPass ) {
state . windowPass = lovrPassCreate ( ) ;
}
2022-08-26 04:57:15 +00:00
2023-04-30 06:02:37 +00:00
Texture * window = lovrGraphicsGetWindowTexture ( ) ;
2022-08-26 04:57:15 +00:00
2023-04-30 06:02:37 +00:00
if ( ! window ) {
return NULL ; // The window may become unavailable during a resize
2022-08-26 04:57:15 +00:00
}
2023-04-30 06:02:37 +00:00
lovrPassReset ( state . windowPass ) ;
Texture * textures [ 4 ] = { state . window } ;
memcpy ( state . windowPass - > canvas . color [ 0 ] . clear , state . background , 4 * sizeof ( float ) ) ;
lovrPassSetCanvas ( state . windowPass , textures , NULL , state . depthFormat , state . config . antialias ? 4 : 1 ) ;
2022-08-03 05:00:11 +00:00
return state . windowPass ;
}
2023-04-30 06:02:37 +00:00
Pass * lovrPassCreate ( void ) {
2024-03-11 21:38:00 +00:00
Pass * pass = lovrCalloc ( sizeof ( Pass ) ) ;
2023-04-30 06:02:37 +00:00
pass - > ref = 1 ;
2022-08-26 04:57:15 +00:00
2023-04-30 06:02:37 +00:00
pass - > allocator . limit = 1 < < 28 ;
2023-11-18 10:02:05 +00:00
pass - > allocator . length = 1 < < 12 ;
2023-04-30 06:02:37 +00:00
pass - > allocator . memory = os_vm_init ( pass - > allocator . limit ) ;
os_vm_commit ( pass - > allocator . memory , pass - > allocator . length ) ;
2022-08-26 04:57:15 +00:00
2023-04-30 06:02:37 +00:00
lovrPassReset ( pass ) ;
2022-08-26 04:57:15 +00:00
2023-04-30 06:02:37 +00:00
return pass ;
}
2022-06-04 18:54:05 +00:00
2023-04-30 06:02:37 +00:00
void lovrPassDestroy ( void * ref ) {
Pass * pass = ref ;
lovrPassRelease ( pass ) ;
for ( uint32_t i = 0 ; i < COUNTOF ( pass - > canvas . color ) ; i + + ) {
lovrRelease ( pass - > canvas . color [ i ] . texture , lovrTextureDestroy ) ;
}
lovrRelease ( pass - > canvas . depth . texture , lovrTextureDestroy ) ;
lovrRelease ( pass - > tally . buffer , lovrBufferDestroy ) ;
if ( pass - > tally . gpu ) {
gpu_tally_destroy ( pass - > tally . gpu ) ;
2024-01-18 00:05:37 +00:00
lovrRelease ( pass - > tally . tempBuffer , lovrBufferDestroy ) ;
2023-04-30 06:02:37 +00:00
}
2024-03-08 19:56:23 +00:00
if ( pass - > buffers . current ) {
pass - > buffers . current - > tick = state . tick ;
freeBlock ( & state . bufferAllocators [ GPU_BUFFER_STREAM ] , pass - > buffers . current ) ;
}
2023-04-30 06:02:37 +00:00
os_vm_free ( pass - > allocator . memory , pass - > allocator . limit ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( pass ) ;
2023-04-30 06:02:37 +00:00
}
2022-05-30 19:29:00 +00:00
2023-04-30 06:02:37 +00:00
void lovrPassReset ( Pass * pass ) {
lovrPassRelease ( pass ) ;
pass - > allocator . cursor = 0 ;
pass - > access [ ACCESS_RENDER ] = NULL ;
pass - > access [ ACCESS_COMPUTE ] = NULL ;
2024-02-20 23:07:30 +00:00
pass - > flags = DIRTY_BINDINGS ;
2023-11-30 08:14:06 +00:00
pass - > transform = lovrPassAllocate ( pass , TRANSFORM_STACK_SIZE * 16 * sizeof ( float ) ) ;
pass - > pipeline = lovrPassAllocate ( pass , PIPELINE_STACK_SIZE * sizeof ( Pipeline ) ) ;
2023-11-18 10:02:05 +00:00
pass - > bindings = lovrPassAllocate ( pass , 32 * sizeof ( gpu_binding ) ) ;
2024-02-20 23:07:30 +00:00
pass - > uniforms = NULL ;
2023-11-18 10:02:05 +00:00
pass - > computeCount = 0 ;
2023-04-30 06:02:37 +00:00
pass - > computes = NULL ;
2023-11-18 10:02:05 +00:00
pass - > drawCount = 0 ;
pass - > draws = lovrPassAllocate ( pass , pass - > drawCapacity * sizeof ( Draw ) ) ;
2023-04-30 06:02:37 +00:00
memset ( & pass - > geocache , 0 , sizeof ( pass - > geocache ) ) ;
2022-06-04 16:47:02 +00:00
2023-06-06 02:44:53 +00:00
pass - > tally . active = false ;
pass - > tally . count = 0 ;
2023-04-30 06:02:37 +00:00
pass - > transformIndex = 0 ;
mat4_identity ( pass - > transform ) ;
2022-08-03 05:00:11 +00:00
2023-04-30 06:02:37 +00:00
pass - > pipelineIndex = 0 ;
memset ( pass - > pipeline , 0 , sizeof ( Pipeline ) ) ;
2023-06-11 05:06:29 +00:00
pass - > pipeline - > mode = DRAW_TRIANGLES ;
2023-06-24 02:05:42 +00:00
pass - > pipeline - > lastVertexFormat = ~ 0u ;
2023-04-30 06:02:37 +00:00
pass - > pipeline - > color [ 0 ] = 1.f ;
pass - > pipeline - > color [ 1 ] = 1.f ;
pass - > pipeline - > color [ 2 ] = 1.f ;
pass - > pipeline - > color [ 3 ] = 1.f ;
2024-01-12 01:22:58 +00:00
pass - > pipeline - > info . pass = pass - > gpu ;
2023-04-30 06:02:37 +00:00
pass - > pipeline - > info . depth . test = GPU_COMPARE_GEQUAL ;
pass - > pipeline - > info . depth . write = true ;
pass - > pipeline - > info . stencil . testMask = 0xff ;
pass - > pipeline - > info . stencil . writeMask = 0xff ;
2024-01-12 01:22:58 +00:00
for ( uint32_t i = 0 ; i < 4 ; i + + ) {
2023-04-30 06:02:37 +00:00
lovrPassSetBlendMode ( pass , i , BLEND_ALPHA , BLEND_ALPHA_MULTIPLY ) ;
2024-01-12 01:22:58 +00:00
pass - > pipeline - > info . colorMask [ i ] = 0xf ;
2023-06-06 02:44:53 +00:00
}
pass - > cameraCount = 0 ;
2024-01-12 01:22:58 +00:00
if ( pass - > canvas . views > 0 ) {
2023-06-06 02:44:53 +00:00
float viewMatrix [ 16 ] ;
float projection [ 16 ] ;
mat4_identity ( viewMatrix ) ;
2024-01-12 01:22:58 +00:00
mat4_perspective ( projection , 1.2f , ( float ) pass - > canvas . width / pass - > canvas . height , .01f , 0.f ) ;
for ( uint32_t i = 0 ; i < pass - > canvas . views ; i + + ) {
2023-06-06 02:44:53 +00:00
lovrPassSetViewMatrix ( pass , i , viewMatrix ) ;
lovrPassSetProjection ( pass , i , projection ) ;
}
2022-06-04 08:33:11 +00:00
}
2022-05-11 19:51:13 +00:00
2023-04-30 06:02:37 +00:00
memset ( pass - > viewport , 0 , sizeof ( pass - > viewport ) ) ;
memset ( pass - > scissor , 0 , sizeof ( pass - > scissor ) ) ;
2023-01-23 07:26:15 +00:00
2023-06-06 02:44:53 +00:00
pass - > sampler = NULL ;
2023-04-30 06:02:37 +00:00
}
2023-01-23 07:26:15 +00:00
2023-06-23 21:41:39 +00:00
const PassStats * lovrPassGetStats ( Pass * pass ) {
pass - > stats . draws = pass - > drawCount ;
pass - > stats . computes = pass - > computeCount ;
2023-09-12 03:19:36 +00:00
pass - > stats . cpuMemoryReserved = pass - > allocator . length ;
pass - > stats . cpuMemoryUsed = pass - > allocator . cursor ;
2023-06-23 21:41:39 +00:00
return & pass - > stats ;
2023-04-30 06:02:37 +00:00
}
2023-01-23 07:26:15 +00:00
2023-04-30 06:02:37 +00:00
void lovrPassGetCanvas ( Pass * pass , Texture * textures [ 4 ] , Texture * * depthTexture , uint32_t * depthFormat , uint32_t * samples ) {
for ( uint32_t i = 0 ; i < COUNTOF ( pass - > canvas . color ) ; i + + ) {
textures [ i ] = pass - > canvas . color [ i ] . texture ;
2022-08-03 05:00:11 +00:00
}
2023-04-30 06:02:37 +00:00
* depthTexture = pass - > canvas . depth . texture ;
* depthFormat = pass - > canvas . depth . format ;
* samples = pass - > canvas . samples ;
}
void lovrPassSetCanvas ( Pass * pass , Texture * textures [ 4 ] , Texture * depthTexture , uint32_t depthFormat , uint32_t samples ) {
Canvas * canvas = & pass - > canvas ;
2022-05-11 19:51:13 +00:00
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < canvas - > count ; i + + ) {
lovrRelease ( canvas - > color [ i ] . texture , lovrTextureDestroy ) ;
canvas - > color [ i ] . texture = NULL ;
2022-08-03 05:00:11 +00:00
}
2023-04-30 06:02:37 +00:00
canvas - > count = 0 ;
2022-06-01 04:20:01 +00:00
2023-04-30 06:02:37 +00:00
lovrRelease ( canvas - > depth . texture , lovrTextureDestroy ) ;
canvas - > depth . texture = NULL ;
canvas - > depth . format = 0 ;
2022-05-30 19:29:00 +00:00
2023-04-30 06:02:37 +00:00
const TextureInfo * t = textures [ 0 ] ? & textures [ 0 ] - > info : & depthTexture - > info ;
2022-08-03 05:00:11 +00:00
2023-06-06 02:44:53 +00:00
if ( textures [ 0 ] | | depthTexture ) {
2023-04-30 06:02:37 +00:00
canvas - > width = t - > width ;
canvas - > height = t - > height ;
2023-06-06 02:44:53 +00:00
canvas - > views = t - > layers ;
2023-04-30 06:02:37 +00:00
lovrCheck ( t - > width < = state . limits . renderSize [ 0 ] , " Pass canvas width (%d) exceeds the renderSize limit of this GPU (%d) " , t - > width , state . limits . renderSize [ 0 ] ) ;
lovrCheck ( t - > height < = state . limits . renderSize [ 1 ] , " Pass canvas height (%d) exceeds the renderSize limit of this GPU (%d) " , t - > height , state . limits . renderSize [ 1 ] ) ;
lovrCheck ( t - > layers < = state . limits . renderSize [ 2 ] , " Pass canvas layer count (%d) exceeds the renderSize limit of this GPU (%d) " , t - > layers , state . limits . renderSize [ 2 ] ) ;
2023-06-06 02:44:53 +00:00
lovrCheck ( samples = = 1 | | samples = = 4 , " Currently MSAA must be 1 or 4 " ) ;
2024-02-22 22:40:32 +00:00
canvas - > samples = samples ;
canvas - > resolve = samples > 1 ;
2023-09-30 20:19:31 +00:00
} else {
memset ( canvas , 0 , sizeof ( Canvas ) ) ;
2022-05-30 22:36:31 +00:00
}
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < COUNTOF ( canvas - > color ) & & textures [ i ] ; i + + , canvas - > count + + ) {
const TextureInfo * texture = & textures [ i ] - > info ;
2023-07-11 02:21:11 +00:00
bool renderable = texture - > format = = GPU_FORMAT_SURFACE | | ( state . features . formats [ texture - > format ] [ texture - > srgb ] & GPU_FEATURE_RENDER ) ;
2023-04-30 06:02:37 +00:00
lovrCheck ( ! isDepthFormat ( texture - > format ) , " Unable to use a depth texture as a color target " ) ;
2023-07-11 02:21:11 +00:00
lovrCheck ( renderable , " This GPU does not support rendering to the texture format/encoding used by canvas texture #%d " , i + 1 ) ;
2023-04-30 06:02:37 +00:00
lovrCheck ( texture - > usage & TEXTURE_RENDER , " Texture must be created with the 'render' flag to render to it " ) ;
lovrCheck ( texture - > width = = t - > width , " Canvas texture sizes must match " ) ;
lovrCheck ( texture - > height = = t - > height , " Canvas texture sizes must match " ) ;
lovrCheck ( texture - > layers = = t - > layers , " Canvas texture layer counts must match " ) ;
canvas - > color [ i ] . texture = textures [ i ] ;
lovrRetain ( textures [ i ] ) ;
2022-05-30 22:36:31 +00:00
}
2022-05-24 06:10:11 +00:00
2023-04-30 06:02:37 +00:00
if ( depthTexture ) {
const TextureInfo * texture = & depthTexture - > info ;
lovrCheck ( isDepthFormat ( texture - > format ) , " Canvas depth textures must have a depth format " ) ;
lovrCheck ( texture - > usage & TEXTURE_RENDER , " Texture must be created with the 'render' flag to render to it " ) ;
lovrCheck ( texture - > width = = t - > width , " Canvas texture sizes must match " ) ;
lovrCheck ( texture - > height = = t - > height , " Canvas texture sizes must match " ) ;
lovrCheck ( texture - > layers = = t - > layers , " Canvas texture layer counts must match " ) ;
2024-02-22 22:40:32 +00:00
lovrCheck ( samples = = 1 | | state . features . depthResolve , " This GPU does not support resolving depth textures, MSAA should be set to 1 " ) ;
2023-04-30 06:02:37 +00:00
canvas - > depth . texture = depthTexture ;
canvas - > depth . format = texture - > format ;
lovrRetain ( depthTexture ) ;
} else if ( depthFormat ) {
lovrCheck ( isDepthFormat ( depthFormat ) , " Expected depth format for canvas depth (received color format) " ) ;
2023-07-11 02:21:11 +00:00
lovrCheck ( state . features . formats [ depthFormat ] [ 0 ] & GPU_FEATURE_RENDER , " Canvas depth format is not supported by this GPU " ) ;
2023-04-30 06:02:37 +00:00
canvas - > depth . format = depthFormat ;
}
2024-01-12 01:22:58 +00:00
pass - > gpu = getPass ( canvas ) ;
2023-06-06 02:44:53 +00:00
lovrPassReset ( pass ) ;
2023-04-30 06:02:37 +00:00
}
2022-08-06 04:05:02 +00:00
2023-04-30 06:02:37 +00:00
void lovrPassGetClear ( Pass * pass , LoadAction loads [ 4 ] , float clears [ 4 ] [ 4 ] , LoadAction * depthLoad , float * depthClear ) {
for ( uint32_t i = 0 ; i < pass - > canvas . count ; i + + ) {
loads [ i ] = pass - > canvas . color [ i ] . load ;
if ( pass - > canvas . color [ i ] . load = = LOAD_CLEAR ) {
clears [ i ] [ 0 ] = lovrMathLinearToGamma ( pass - > canvas . color [ i ] . clear [ 0 ] ) ;
clears [ i ] [ 1 ] = lovrMathLinearToGamma ( pass - > canvas . color [ i ] . clear [ 1 ] ) ;
clears [ i ] [ 2 ] = lovrMathLinearToGamma ( pass - > canvas . color [ i ] . clear [ 2 ] ) ;
clears [ i ] [ 3 ] = pass - > canvas . color [ i ] . clear [ 3 ] ;
}
}
* depthLoad = pass - > canvas . depth . load ;
* depthClear = pass - > canvas . depth . clear ;
2022-08-26 04:57:15 +00:00
}
2022-06-06 19:38:15 +00:00
2023-04-30 06:02:37 +00:00
void lovrPassSetClear ( Pass * pass , LoadAction loads [ 4 ] , float clears [ 4 ] [ 4 ] , LoadAction depthLoad , float depthClear ) {
2024-01-12 01:22:58 +00:00
bool dirty = false ;
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < pass - > canvas . count ; i + + ) {
2024-01-12 01:22:58 +00:00
dirty | = loads [ i ] ! = pass - > canvas . color [ i ] . load ;
2023-04-30 06:02:37 +00:00
pass - > canvas . color [ i ] . load = loads [ i ] ;
if ( loads [ i ] = = LOAD_CLEAR ) {
2023-05-03 23:35:09 +00:00
pass - > canvas . color [ i ] . clear [ 0 ] = lovrMathGammaToLinear ( clears [ i ] [ 0 ] ) ;
pass - > canvas . color [ i ] . clear [ 1 ] = lovrMathGammaToLinear ( clears [ i ] [ 1 ] ) ;
pass - > canvas . color [ i ] . clear [ 2 ] = lovrMathGammaToLinear ( clears [ i ] [ 2 ] ) ;
2023-04-30 06:02:37 +00:00
pass - > canvas . color [ i ] . clear [ 3 ] = clears [ i ] [ 3 ] ;
} else {
memset ( pass - > canvas . color [ i ] . clear , 0 , 4 * sizeof ( float ) ) ;
}
}
2024-01-12 01:22:58 +00:00
dirty | = depthLoad ! = pass - > canvas . depth . load ;
2023-04-30 06:02:37 +00:00
pass - > canvas . depth . load = depthLoad ;
pass - > canvas . depth . clear = depthLoad = = LOAD_CLEAR ? depthClear : 0.f ;
2024-01-12 01:22:58 +00:00
if ( dirty ) pass - > gpu = getPass ( & pass - > canvas ) ;
2022-08-26 04:57:15 +00:00
}
2022-04-29 05:30:31 +00:00
2023-04-30 06:02:37 +00:00
uint32_t lovrPassGetAttachmentCount ( Pass * pass , bool * depth ) {
if ( depth ) * depth = pass - > canvas . depth . texture | | pass - > canvas . depth . format ;
return pass - > canvas . count ;
2022-08-26 04:57:15 +00:00
}
2022-04-29 05:30:31 +00:00
2022-08-26 04:57:15 +00:00
uint32_t lovrPassGetWidth ( Pass * pass ) {
2023-04-30 06:02:37 +00:00
return pass - > canvas . width ;
2022-08-26 04:57:15 +00:00
}
2022-08-03 05:00:11 +00:00
2022-08-26 04:57:15 +00:00
uint32_t lovrPassGetHeight ( Pass * pass ) {
2023-04-30 06:02:37 +00:00
return pass - > canvas . height ;
2022-08-26 04:57:15 +00:00
}
2022-08-03 05:00:11 +00:00
2022-08-26 04:57:15 +00:00
uint32_t lovrPassGetViewCount ( Pass * pass ) {
2023-04-30 06:02:37 +00:00
return pass - > canvas . views ;
2022-04-29 05:30:31 +00:00
}
2023-06-06 02:44:53 +00:00
static Camera * getCamera ( Pass * pass ) {
if ( pass - > flags & DIRTY_CAMERA ) {
return pass - > cameras + ( pass - > cameraCount - 1 ) * pass - > canvas . views ;
}
uint32_t views = pass - > canvas . views ;
uint32_t stride = sizeof ( Camera ) * views ;
uint32_t count = pass - > cameraCount ;
Camera * cameras = lovrPassAllocate ( pass , ( count + 1 ) * stride ) ;
Camera * newCamera = cameras + count * views ;
2023-10-05 19:21:27 +00:00
if ( pass - > cameras ) memcpy ( cameras , pass - > cameras , count * stride ) ;
2023-06-06 02:44:53 +00:00
memcpy ( newCamera , newCamera - views , count > 0 ? stride : 0 ) ;
pass - > flags | = DIRTY_CAMERA ;
pass - > cameras = cameras ;
pass - > cameraCount + + ;
return newCamera ;
}
2022-10-12 17:57:43 +00:00
void lovrPassGetViewMatrix ( Pass * pass , uint32_t index , float viewMatrix [ 16 ] ) {
2023-04-30 06:02:37 +00:00
lovrCheck ( index < pass - > canvas . views , " Invalid view index '%d' " , index + 1 ) ;
2023-06-06 02:44:53 +00:00
mat4_init ( viewMatrix , getCamera ( pass ) [ index ] . viewMatrix ) ;
2022-05-30 19:29:00 +00:00
}
2022-10-12 17:57:43 +00:00
void lovrPassSetViewMatrix ( Pass * pass , uint32_t index , float viewMatrix [ 16 ] ) {
2023-04-30 06:02:37 +00:00
lovrCheck ( index < pass - > canvas . views , " Invalid view index '%d' " , index + 1 ) ;
2023-06-06 02:44:53 +00:00
mat4_init ( getCamera ( pass ) [ index ] . viewMatrix , viewMatrix ) ;
2022-05-30 19:29:00 +00:00
}
2022-10-12 17:57:43 +00:00
void lovrPassGetProjection ( Pass * pass , uint32_t index , float projection [ 16 ] ) {
2023-04-30 06:02:37 +00:00
lovrCheck ( index < pass - > canvas . views , " Invalid view index '%d' " , index + 1 ) ;
2023-06-06 02:44:53 +00:00
mat4_init ( projection , getCamera ( pass ) [ index ] . projection ) ;
2022-05-30 19:29:00 +00:00
}
2022-10-12 17:57:43 +00:00
void lovrPassSetProjection ( Pass * pass , uint32_t index , float projection [ 16 ] ) {
2023-04-30 06:02:37 +00:00
lovrCheck ( index < pass - > canvas . views , " Invalid view index '%d' " , index + 1 ) ;
2023-06-06 02:44:53 +00:00
mat4_init ( getCamera ( pass ) [ index ] . projection , projection ) ;
2023-04-30 06:02:37 +00:00
}
2022-07-17 15:59:39 +00:00
2023-04-30 06:02:37 +00:00
void lovrPassGetViewport ( Pass * pass , float viewport [ 6 ] ) {
memcpy ( viewport , pass - > viewport , 6 * sizeof ( float ) ) ;
}
void lovrPassSetViewport ( Pass * pass , float viewport [ 6 ] ) {
memcpy ( pass - > viewport , viewport , 6 * sizeof ( float ) ) ;
}
void lovrPassGetScissor ( Pass * pass , uint32_t scissor [ 4 ] ) {
memcpy ( scissor , pass - > scissor , 4 * sizeof ( uint32_t ) ) ;
}
void lovrPassSetScissor ( Pass * pass , uint32_t scissor [ 4 ] ) {
memcpy ( pass - > scissor , scissor , 4 * sizeof ( uint32_t ) ) ;
2022-05-30 19:29:00 +00:00
}
2022-05-07 00:26:38 +00:00
void lovrPassPush ( Pass * pass , StackType stack ) {
2022-06-01 04:57:55 +00:00
switch ( stack ) {
case STACK_TRANSFORM :
2023-11-30 08:14:06 +00:00
lovrCheck ( + + pass - > transformIndex < TRANSFORM_STACK_SIZE , " %s stack overflow (more pushes than pops?) " , " Transform " ) ;
2022-08-26 04:57:15 +00:00
mat4_init ( pass - > transform + 16 , pass - > transform ) ;
pass - > transform + = 16 ;
2022-06-01 04:57:55 +00:00
break ;
2022-07-30 22:03:54 +00:00
case STACK_STATE :
2023-11-30 08:14:06 +00:00
lovrCheck ( + + pass - > pipelineIndex < PIPELINE_STACK_SIZE , " %s stack overflow (more pushes than pops?) " , " Pipeline " ) ;
2022-08-26 04:57:15 +00:00
memcpy ( pass - > pipeline + 1 , pass - > pipeline , sizeof ( Pipeline ) ) ;
pass - > pipeline + + ;
2022-07-18 02:53:31 +00:00
lovrRetain ( pass - > pipeline - > font ) ;
2022-06-18 00:43:26 +00:00
lovrRetain ( pass - > pipeline - > shader ) ;
lovrRetain ( pass - > pipeline - > material ) ;
2022-06-01 04:57:55 +00:00
break ;
default : break ;
2022-05-07 00:26:38 +00:00
}
}
void lovrPassPop ( Pass * pass , StackType stack ) {
2022-06-01 04:57:55 +00:00
switch ( stack ) {
case STACK_TRANSFORM :
2023-11-30 08:14:06 +00:00
lovrCheck ( - - pass - > transformIndex < TRANSFORM_STACK_SIZE , " %s stack underflow (more pops than pushes?) " , " Transform " ) ;
2022-08-26 04:57:15 +00:00
pass - > transform - = 16 ;
2022-06-01 04:57:55 +00:00
break ;
2022-07-30 22:03:54 +00:00
case STACK_STATE :
2022-07-18 02:53:31 +00:00
lovrRelease ( pass - > pipeline - > font , lovrFontDestroy ) ;
2022-06-01 04:57:55 +00:00
lovrRelease ( pass - > pipeline - > shader , lovrShaderDestroy ) ;
2022-06-18 00:43:26 +00:00
lovrRelease ( pass - > pipeline - > material , lovrMaterialDestroy ) ;
2023-11-30 08:14:06 +00:00
lovrCheck ( - - pass - > pipelineIndex < PIPELINE_STACK_SIZE , " %s stack underflow (more pops than pushes?) " , " Pipeline " ) ;
2022-08-26 04:57:15 +00:00
pass - > pipeline - - ;
2022-06-08 03:42:10 +00:00
pass - > pipeline - > dirty = true ;
2022-06-01 04:57:55 +00:00
break ;
default : break ;
2022-05-07 00:26:38 +00:00
}
}
void lovrPassOrigin ( Pass * pass ) {
mat4_identity ( pass - > transform ) ;
}
void lovrPassTranslate ( Pass * pass , vec3 translation ) {
mat4_translate ( pass - > transform , translation [ 0 ] , translation [ 1 ] , translation [ 2 ] ) ;
}
void lovrPassRotate ( Pass * pass , quat rotation ) {
mat4_rotateQuat ( pass - > transform , rotation ) ;
}
void lovrPassScale ( Pass * pass , vec3 scale ) {
mat4_scale ( pass - > transform , scale [ 0 ] , scale [ 1 ] , scale [ 2 ] ) ;
}
void lovrPassTransform ( Pass * pass , mat4 transform ) {
mat4_mul ( pass - > transform , transform ) ;
}
2022-05-11 19:50:26 +00:00
void lovrPassSetAlphaToCoverage ( Pass * pass , bool enabled ) {
pass - > pipeline - > dirty | = enabled ! = pass - > pipeline - > info . multisample . alphaToCoverage ;
pass - > pipeline - > info . multisample . alphaToCoverage = enabled ;
}
2023-02-05 23:06:18 +00:00
void lovrPassSetBlendMode ( Pass * pass , uint32_t index , BlendMode mode , BlendAlphaMode alphaMode ) {
2022-05-11 19:50:26 +00:00
if ( mode = = BLEND_NONE ) {
2024-01-12 01:22:58 +00:00
pass - > pipeline - > dirty | = pass - > pipeline - > info . blend [ index ] . enabled ;
memset ( & pass - > pipeline - > info . blend [ index ] , 0 , sizeof ( gpu_blend_state ) ) ;
2022-05-11 19:50:26 +00:00
return ;
}
2024-01-12 01:22:58 +00:00
gpu_blend_state * blend = & pass - > pipeline - > info . blend [ index ] ;
2022-05-11 19:50:26 +00:00
2024-01-17 23:40:42 +00:00
static const gpu_blend_state table [ ] = {
[ BLEND_ALPHA ] = {
. color = { GPU_BLEND_SRC_ALPHA , GPU_BLEND_ONE_MINUS_SRC_ALPHA , GPU_BLEND_ADD } ,
. alpha = { GPU_BLEND_ONE , GPU_BLEND_ONE_MINUS_SRC_ALPHA , GPU_BLEND_ADD }
} ,
[ BLEND_ADD ] = {
. color = { GPU_BLEND_SRC_ALPHA , GPU_BLEND_ONE , GPU_BLEND_ADD } ,
. alpha = { GPU_BLEND_ZERO , GPU_BLEND_ONE , GPU_BLEND_ADD }
} ,
[ BLEND_SUBTRACT ] = {
. color = { GPU_BLEND_SRC_ALPHA , GPU_BLEND_ONE , GPU_BLEND_RSUB } ,
. alpha = { GPU_BLEND_ZERO , GPU_BLEND_ONE , GPU_BLEND_RSUB }
} ,
[ BLEND_MULTIPLY ] = {
. color = { GPU_BLEND_DST_COLOR , GPU_BLEND_ZERO , GPU_BLEND_ADD } ,
. alpha = { GPU_BLEND_DST_COLOR , GPU_BLEND_ZERO , GPU_BLEND_ADD }
} ,
[ BLEND_LIGHTEN ] = {
. color = { GPU_BLEND_SRC_ALPHA , GPU_BLEND_ZERO , GPU_BLEND_MAX } ,
. alpha = { GPU_BLEND_ONE , GPU_BLEND_ZERO , GPU_BLEND_MAX }
} ,
[ BLEND_DARKEN ] = {
. color = { GPU_BLEND_SRC_ALPHA , GPU_BLEND_ZERO , GPU_BLEND_MIN } ,
. alpha = { GPU_BLEND_ONE , GPU_BLEND_ZERO , GPU_BLEND_MIN }
} ,
[ BLEND_SCREEN ] = {
. color = { GPU_BLEND_SRC_ALPHA , GPU_BLEND_ONE_MINUS_SRC_COLOR , GPU_BLEND_ADD } ,
. alpha = { GPU_BLEND_ONE , GPU_BLEND_ONE_MINUS_SRC_COLOR , GPU_BLEND_ADD }
} ,
2022-05-11 19:50:26 +00:00
} ;
2024-01-17 23:40:42 +00:00
* blend = table [ mode ] ;
blend - > enabled = true ;
2022-05-11 19:50:26 +00:00
if ( alphaMode = = BLEND_PREMULTIPLIED & & mode ! = BLEND_MULTIPLY ) {
blend - > color . src = GPU_BLEND_ONE ;
}
pass - > pipeline - > dirty = true ;
}
2022-05-12 00:30:08 +00:00
void lovrPassSetColor ( Pass * pass , float color [ 4 ] ) {
pass - > pipeline - > color [ 0 ] = lovrMathGammaToLinear ( color [ 0 ] ) ;
pass - > pipeline - > color [ 1 ] = lovrMathGammaToLinear ( color [ 1 ] ) ;
pass - > pipeline - > color [ 2 ] = lovrMathGammaToLinear ( color [ 2 ] ) ;
pass - > pipeline - > color [ 3 ] = color [ 3 ] ;
}
2023-02-05 23:06:18 +00:00
void lovrPassSetColorWrite ( Pass * pass , uint32_t index , bool r , bool g , bool b , bool a ) {
2022-05-11 19:50:26 +00:00
uint8_t mask = ( r < < 0 ) | ( g < < 1 ) | ( b < < 2 ) | ( a < < 3 ) ;
2024-01-12 01:22:58 +00:00
pass - > pipeline - > dirty | = pass - > pipeline - > info . colorMask [ index ] ! = mask ;
pass - > pipeline - > info . colorMask [ index ] = mask ;
2022-05-11 19:50:26 +00:00
}
void lovrPassSetDepthTest ( Pass * pass , CompareMode test ) {
pass - > pipeline - > dirty | = pass - > pipeline - > info . depth . test ! = ( gpu_compare_mode ) test ;
pass - > pipeline - > info . depth . test = ( gpu_compare_mode ) test ;
}
void lovrPassSetDepthWrite ( Pass * pass , bool write ) {
pass - > pipeline - > dirty | = pass - > pipeline - > info . depth . write ! = write ;
pass - > pipeline - > info . depth . write = write ;
}
void lovrPassSetDepthOffset ( Pass * pass , float offset , float sloped ) {
pass - > pipeline - > info . rasterizer . depthOffset = offset ;
pass - > pipeline - > info . rasterizer . depthOffsetSloped = sloped ;
pass - > pipeline - > dirty = true ;
}
void lovrPassSetDepthClamp ( Pass * pass , bool clamp ) {
if ( state . features . depthClamp ) {
pass - > pipeline - > dirty | = pass - > pipeline - > info . rasterizer . depthClamp ! = clamp ;
pass - > pipeline - > info . rasterizer . depthClamp = clamp ;
}
}
2023-06-23 21:41:39 +00:00
void lovrPassSetFaceCull ( Pass * pass , CullMode mode ) {
pass - > pipeline - > dirty | = pass - > pipeline - > info . rasterizer . cullMode ! = ( gpu_cull_mode ) mode ;
pass - > pipeline - > info . rasterizer . cullMode = ( gpu_cull_mode ) mode ;
}
2022-07-18 02:53:31 +00:00
void lovrPassSetFont ( Pass * pass , Font * font ) {
if ( pass - > pipeline - > font ! = font ) {
lovrRetain ( font ) ;
lovrRelease ( pass - > pipeline - > font , lovrFontDestroy ) ;
pass - > pipeline - > font = font ;
}
}
2023-10-31 23:08:00 +00:00
void lovrPassSetMaterial ( Pass * pass , Material * material ) {
if ( ! material ) material = state . defaultMaterial ;
2022-06-17 06:49:09 +00:00
if ( pass - > pipeline - > material ! = material ) {
lovrRetain ( material ) ;
lovrRelease ( pass - > pipeline - > material , lovrMaterialDestroy ) ;
pass - > pipeline - > material = material ;
}
2022-06-10 06:05:32 +00:00
}
2023-06-11 05:06:29 +00:00
void lovrPassSetMeshMode ( Pass * pass , DrawMode mode ) {
2022-07-13 02:35:23 +00:00
pass - > pipeline - > mode = mode ;
}
2022-06-08 03:42:10 +00:00
void lovrPassSetSampler ( Pass * pass , Sampler * sampler ) {
2023-04-30 06:02:37 +00:00
if ( sampler ! = pass - > sampler ) {
2022-06-08 03:42:10 +00:00
lovrRetain ( sampler ) ;
2023-04-30 06:02:37 +00:00
lovrRelease ( pass - > sampler , lovrSamplerDestroy ) ;
pass - > sampler = sampler ;
2022-06-08 03:42:10 +00:00
}
}
2022-05-11 19:50:26 +00:00
void lovrPassSetShader ( Pass * pass , Shader * shader ) {
2024-02-24 22:34:29 +00:00
Shader * old = pass - > pipeline - > shader ;
2022-05-24 05:32:36 +00:00
2024-02-24 22:34:29 +00:00
if ( shader = = old ) {
return ;
2023-05-04 03:15:52 +00:00
}
2022-06-28 06:15:19 +00:00
if ( shader ) {
2024-02-24 22:34:29 +00:00
gpu_binding bindings [ 32 ] ;
// Ensure there's a valid binding for every resource in the new shader. If the old shader had a
// binding with the same name and type, then use that, otherwise use a "default" resource.
for ( uint32_t i = 0 ; i < shader - > resourceCount ; i + + ) {
ShaderResource * resource = & shader - > resources [ i ] ;
bool useDefault = true ;
if ( old ) {
ShaderResource * other = old - > resources ;
for ( uint32_t j = 0 ; j < old - > resourceCount ; j + + , other + + ) {
if ( other - > hash = = resource - > hash & & other - > type = = resource - > type ) {
bindings [ resource - > binding ] = pass - > bindings [ other - > binding ] ;
useDefault = false ;
break ;
2022-06-28 06:15:19 +00:00
}
2022-05-24 05:32:36 +00:00
}
}
2024-02-24 22:34:29 +00:00
if ( useDefault ) {
switch ( resource - > type ) {
case GPU_SLOT_UNIFORM_BUFFER :
case GPU_SLOT_STORAGE_BUFFER :
bindings [ i ] . buffer . object = state . defaultBuffer - > gpu ;
bindings [ i ] . buffer . offset = state . defaultBuffer - > base ;
bindings [ i ] . buffer . extent = state . defaultBuffer - > info . size ;
break ;
case GPU_SLOT_SAMPLED_TEXTURE :
case GPU_SLOT_STORAGE_TEXTURE :
bindings [ i ] . texture = state . defaultTexture - > gpu ;
break ;
case GPU_SLOT_SAMPLER :
bindings [ i ] . sampler = state . defaultSamplers [ FILTER_LINEAR ] - > gpu ;
break ;
default : break ;
2022-06-28 06:15:19 +00:00
}
2024-02-24 22:34:29 +00:00
}
}
2022-05-24 05:32:36 +00:00
2024-02-24 22:34:29 +00:00
memcpy ( pass - > bindings , bindings , shader - > resourceCount * sizeof ( gpu_binding ) ) ;
pass - > flags | = DIRTY_BINDINGS ;
2022-06-28 06:15:19 +00:00
2024-02-24 22:34:29 +00:00
// Uniform data is preserved for uniforms with the same name/size (this might be slow...)
if ( shader - > uniformCount > 0 ) {
void * uniforms = lovrPassAllocate ( pass , shader - > uniformSize ) ;
2022-06-28 06:15:19 +00:00
2024-02-24 22:34:29 +00:00
if ( old & & old - > uniformCount > 0 ) {
for ( uint32_t i = 0 ; i < shader - > uniformCount ; i + + ) {
DataField * uniform = & shader - > uniforms [ i ] ;
DataField * other = old - > uniforms ;
for ( uint32_t j = 0 ; j < old - > uniformCount ; j + + , other + + ) {
if ( uniform - > hash = = other - > hash & & uniform - > stride = = other - > stride & & uniform - > length = = other - > length ) {
void * src = ( char * ) pass - > uniforms + other - > offset ;
void * dst = ( char * ) uniforms + uniform - > offset ;
size_t size = uniform - > stride * MAX ( uniform - > length , 1 ) ;
memcpy ( dst , src , size ) ;
}
}
2022-06-28 06:15:19 +00:00
}
2024-02-24 22:34:29 +00:00
} else {
memset ( uniforms , 0 , shader - > uniformSize ) ;
2022-05-24 05:32:36 +00:00
}
2024-02-24 22:34:29 +00:00
pass - > uniforms = uniforms ;
pass - > flags | = DIRTY_UNIFORMS ;
}
// Custom vertex attributes must be reset: their locations may differ even if the names match
if ( shader - > hasCustomAttributes ) {
pass - > pipeline - > lastVertexBuffer = NULL ;
2022-05-24 05:32:36 +00:00
}
2022-06-28 06:15:19 +00:00
pass - > pipeline - > info . shader = shader - > gpu ;
pass - > pipeline - > info . flags = shader - > flags ;
pass - > pipeline - > info . flagCount = shader - > overrideCount ;
2024-02-24 22:34:29 +00:00
lovrRetain ( shader ) ;
2022-05-24 05:32:36 +00:00
}
2024-02-24 22:34:29 +00:00
lovrRelease ( old , lovrShaderDestroy ) ;
2022-05-11 19:50:26 +00:00
pass - > pipeline - > shader = shader ;
pass - > pipeline - > dirty = true ;
}
void lovrPassSetStencilTest ( Pass * pass , CompareMode test , uint8_t value , uint8_t mask ) {
2023-06-13 06:07:35 +00:00
TextureFormat depthFormat = pass - > canvas . depth . texture ? pass - > canvas . depth . texture - > info . format : pass - > canvas . depth . format ;
lovrCheck ( depthFormat = = FORMAT_D32FS8 | | depthFormat = = FORMAT_D24S8 , " Trying to set stencil mode, but Pass depth texture does not use a stencil format " ) ;
2022-05-11 19:50:26 +00:00
bool hasReplace = false ;
hasReplace | = pass - > pipeline - > info . stencil . failOp = = GPU_STENCIL_REPLACE ;
hasReplace | = pass - > pipeline - > info . stencil . depthFailOp = = GPU_STENCIL_REPLACE ;
hasReplace | = pass - > pipeline - > info . stencil . passOp = = GPU_STENCIL_REPLACE ;
if ( hasReplace & & test ! = COMPARE_NONE ) {
lovrCheck ( value = = pass - > pipeline - > info . stencil . value , " When stencil write is 'replace' and stencil test is active, their values must match " ) ;
}
switch ( test ) { // (Reversed compare mode)
case COMPARE_NONE : default : pass - > pipeline - > info . stencil . test = GPU_COMPARE_NONE ; break ;
case COMPARE_EQUAL : pass - > pipeline - > info . stencil . test = GPU_COMPARE_EQUAL ; break ;
case COMPARE_NEQUAL : pass - > pipeline - > info . stencil . test = GPU_COMPARE_NEQUAL ; break ;
case COMPARE_LESS : pass - > pipeline - > info . stencil . test = GPU_COMPARE_GREATER ; break ;
case COMPARE_LEQUAL : pass - > pipeline - > info . stencil . test = GPU_COMPARE_GEQUAL ; break ;
case COMPARE_GREATER : pass - > pipeline - > info . stencil . test = GPU_COMPARE_LESS ; break ;
case COMPARE_GEQUAL : pass - > pipeline - > info . stencil . test = GPU_COMPARE_LEQUAL ; break ;
}
pass - > pipeline - > info . stencil . testMask = mask ;
if ( test ! = COMPARE_NONE ) pass - > pipeline - > info . stencil . value = value ;
pass - > pipeline - > dirty = true ;
}
void lovrPassSetStencilWrite ( Pass * pass , StencilAction actions [ 3 ] , uint8_t value , uint8_t mask ) {
2023-06-13 06:07:35 +00:00
TextureFormat depthFormat = pass - > canvas . depth . texture ? pass - > canvas . depth . texture - > info . format : pass - > canvas . depth . format ;
lovrCheck ( depthFormat = = FORMAT_D32FS8 | | depthFormat = = FORMAT_D24S8 , " Trying to set stencil mode, but Pass depth texture does not use a stencil format " ) ;
2022-05-11 19:50:26 +00:00
bool hasReplace = actions [ 0 ] = = STENCIL_REPLACE | | actions [ 1 ] = = STENCIL_REPLACE | | actions [ 2 ] = = STENCIL_REPLACE ;
if ( hasReplace & & pass - > pipeline - > info . stencil . test ! = GPU_COMPARE_NONE ) {
lovrCheck ( value = = pass - > pipeline - > info . stencil . value , " When stencil write is 'replace' and stencil test is active, their values must match " ) ;
}
pass - > pipeline - > info . stencil . failOp = ( gpu_stencil_op ) actions [ 0 ] ;
pass - > pipeline - > info . stencil . depthFailOp = ( gpu_stencil_op ) actions [ 1 ] ;
pass - > pipeline - > info . stencil . passOp = ( gpu_stencil_op ) actions [ 2 ] ;
pass - > pipeline - > info . stencil . writeMask = mask ;
if ( hasReplace ) pass - > pipeline - > info . stencil . value = value ;
pass - > pipeline - > dirty = true ;
}
2023-06-23 21:41:39 +00:00
void lovrPassSetViewCull ( Pass * pass , bool enable ) {
pass - > pipeline - > viewCull = enable ;
}
2022-05-11 19:50:26 +00:00
void lovrPassSetWinding ( Pass * pass , Winding winding ) {
pass - > pipeline - > dirty | = pass - > pipeline - > info . rasterizer . winding ! = ( gpu_winding ) winding ;
pass - > pipeline - > info . rasterizer . winding = ( gpu_winding ) winding ;
}
void lovrPassSetWireframe ( Pass * pass , bool wireframe ) {
if ( state . features . wireframe ) {
pass - > pipeline - > dirty | = pass - > pipeline - > info . rasterizer . wireframe ! = ( gpu_winding ) wireframe ;
pass - > pipeline - > info . rasterizer . wireframe = wireframe ;
}
}
2024-02-05 23:03:28 +00:00
void lovrPassSendBuffer ( Pass * pass , const char * name , size_t length , Buffer * buffer , uint32_t offset , uint32_t extent ) {
2022-05-24 06:10:11 +00:00
Shader * shader = pass - > pipeline - > shader ;
lovrCheck ( shader , " A Shader must be active to send resources " ) ;
2024-02-05 23:03:28 +00:00
ShaderResource * resource = findShaderResource ( shader , name , length ) ;
uint32_t slot = resource - > binding ;
2022-05-24 06:10:11 +00:00
2024-02-05 23:03:28 +00:00
lovrCheck ( shader - > bufferMask & ( 1u < < slot ) , " Trying to send a Buffer to '%s', but the active Shader doesn't have a Buffer in that slot " , name ) ;
2023-01-16 13:15:13 +00:00
lovrCheck ( offset < buffer - > info . size , " Buffer offset is past the end of the Buffer " ) ;
2022-05-24 06:10:11 +00:00
uint32_t limit ;
2022-07-04 00:26:31 +00:00
if ( shader - > storageMask & ( 1u < < slot ) ) {
2022-05-24 06:10:11 +00:00
lovrCheck ( ( offset & ( state . limits . storageBufferAlign - 1 ) ) = = 0 , " Storage buffer offset (%d) is not aligned to storageBufferAlign limit (%d) " , offset , state . limits . storageBufferAlign ) ;
limit = state . limits . storageBufferRange ;
} else {
lovrCheck ( ( offset & ( state . limits . uniformBufferAlign - 1 ) ) = = 0 , " Uniform buffer offset (%d) is not aligned to uniformBufferAlign limit (%d) " , offset , state . limits . uniformBufferAlign ) ;
limit = state . limits . uniformBufferRange ;
}
if ( extent = = 0 ) {
2023-01-16 13:15:13 +00:00
extent = MIN ( buffer - > info . size - offset , limit ) ;
2022-05-24 06:10:11 +00:00
} else {
2023-01-16 13:15:13 +00:00
lovrCheck ( offset + extent < = buffer - > info . size , " Buffer range goes past the end of the Buffer " ) ;
2022-05-24 06:10:11 +00:00
lovrCheck ( extent < = limit , " Buffer range exceeds storageBufferRange/uniformBufferRange limit " ) ;
}
2023-04-30 02:06:08 +00:00
trackBuffer ( pass , buffer , resource - > phase , resource - > cache ) ;
2022-05-24 06:10:11 +00:00
pass - > bindings [ slot ] . buffer . object = buffer - > gpu ;
2023-12-30 22:17:20 +00:00
pass - > bindings [ slot ] . buffer . offset = buffer - > base + offset ;
2022-05-24 06:10:11 +00:00
pass - > bindings [ slot ] . buffer . extent = extent ;
2023-04-30 06:02:37 +00:00
pass - > flags | = DIRTY_BINDINGS ;
2022-05-24 06:10:11 +00:00
}
2024-02-05 23:03:28 +00:00
void lovrPassSendTexture ( Pass * pass , const char * name , size_t length , Texture * texture ) {
2022-05-24 06:10:11 +00:00
Shader * shader = pass - > pipeline - > shader ;
lovrCheck ( shader , " A Shader must be active to send resources " ) ;
2024-02-05 23:03:28 +00:00
ShaderResource * resource = findShaderResource ( shader , name , length ) ;
uint32_t slot = resource - > binding ;
2022-05-24 06:10:11 +00:00
2024-02-05 23:03:28 +00:00
lovrCheck ( shader - > textureMask & ( 1u < < slot ) , " Trying to send a Texture to '%s', but the active Shader doesn't have a Texture in that slot " , name ) ;
2022-05-24 06:10:11 +00:00
2023-11-01 00:14:09 +00:00
gpu_texture * view = texture - > gpu ;
2022-07-04 00:26:31 +00:00
if ( shader - > storageMask & ( 1u < < slot ) ) {
2022-05-26 07:07:10 +00:00
lovrCheck ( texture - > info . usage & TEXTURE_STORAGE , " Textures must be created with the 'storage' usage to send them to image variables in shaders " ) ;
2023-11-29 06:44:37 +00:00
view = texture - > storageView ;
2022-05-24 06:10:11 +00:00
} else {
2022-05-26 07:07:10 +00:00
lovrCheck ( texture - > info . usage & TEXTURE_SAMPLE , " Textures must be created with the 'sample' usage to send them to sampler variables in shaders " ) ;
2022-05-24 06:10:11 +00:00
}
2023-04-30 02:06:08 +00:00
trackTexture ( pass , texture , resource - > phase , resource - > cache ) ;
2023-11-01 00:14:09 +00:00
pass - > bindings [ slot ] . texture = view ;
2023-04-30 06:02:37 +00:00
pass - > flags | = DIRTY_BINDINGS ;
2022-05-24 06:10:11 +00:00
}
2024-02-05 23:03:28 +00:00
void lovrPassSendSampler ( Pass * pass , const char * name , size_t length , Sampler * sampler ) {
2022-05-24 06:10:11 +00:00
Shader * shader = pass - > pipeline - > shader ;
lovrCheck ( shader , " A Shader must be active to send resources " ) ;
2024-02-05 23:03:28 +00:00
ShaderResource * resource = findShaderResource ( shader , name , length ) ;
uint32_t slot = resource - > binding ;
2022-05-24 06:10:11 +00:00
2024-02-05 23:03:28 +00:00
lovrCheck ( shader - > samplerMask & ( 1u < < slot ) , " Trying to send a Sampler to '%s', but the active Shader doesn't have a Sampler in that slot " , name ) ;
2022-05-24 06:10:11 +00:00
pass - > bindings [ slot ] . sampler = sampler - > gpu ;
2023-04-30 06:02:37 +00:00
pass - > flags | = DIRTY_BINDINGS ;
2022-05-24 06:10:11 +00:00
}
2024-02-05 23:03:28 +00:00
void lovrPassSendData ( Pass * pass , const char * name , size_t length , void * * data , DataField * * format ) {
2022-06-06 01:54:26 +00:00
Shader * shader = pass - > pipeline - > shader ;
2023-01-16 13:15:13 +00:00
lovrCheck ( shader , " A Shader must be active to send data to it " ) ;
2022-06-06 01:54:26 +00:00
uint32_t hash = ( uint32_t ) hash64 ( name , length ) ;
2024-02-20 23:07:30 +00:00
for ( uint32_t i = 0 ; i < shader - > uniformCount ; i + + ) {
if ( shader - > uniforms [ i ] . hash = = hash ) {
* data = ( char * ) pass - > uniforms + shader - > uniforms [ i ] . offset ;
* format = & shader - > uniforms [ i ] ;
pass - > flags | = DIRTY_UNIFORMS ;
2022-07-02 01:34:31 +00:00
return ;
2022-06-06 01:54:26 +00:00
}
}
2024-02-05 23:03:28 +00:00
ShaderResource * resource = findShaderResource ( shader , name , length ) ;
uint32_t slot = resource - > binding ;
2023-01-16 13:15:13 +00:00
2024-02-05 23:03:28 +00:00
lovrCheck ( shader - > bufferMask & ( 1u < < slot ) , " Trying to send data to '%s', but that slot isn't a Buffer " , name ) ;
2023-01-16 13:15:13 +00:00
lovrCheck ( ~ shader - > storageMask & ( 1u < < slot ) , " Unable to send table data to a storage buffer " ) ;
2023-08-25 20:42:09 +00:00
uint32_t size = resource - > format - > stride * MAX ( resource - > format - > length , 1 ) ;
2023-12-30 20:39:50 +00:00
BufferView view = lovrPassGetBuffer ( pass , size , state . limits . uniformBufferAlign ) ;
pass - > bindings [ slot ] . buffer = ( gpu_buffer_binding ) { view . buffer , view . offset , view . extent } ;
2023-04-30 06:02:37 +00:00
pass - > flags | = DIRTY_BINDINGS ;
2023-01-16 13:15:13 +00:00
2023-12-30 20:39:50 +00:00
* data = view . pointer ;
2023-08-25 20:42:09 +00:00
* format = resource - > format ;
2022-06-06 01:54:26 +00:00
}
2023-11-18 10:02:05 +00:00
static void lovrPassResolvePipeline ( Pass * pass , DrawInfo * info , Draw * draw , Draw * prev ) {
2022-05-30 22:36:31 +00:00
Pipeline * pipeline = pass - > pipeline ;
2023-04-30 06:02:37 +00:00
Shader * shader = draw - > shader ;
2022-05-30 22:36:31 +00:00
2023-04-30 06:02:37 +00:00
if ( pipeline - > info . drawMode ! = ( gpu_draw_mode ) info - > mode ) {
pipeline - > info . drawMode = ( gpu_draw_mode ) info - > mode ;
2022-05-30 22:36:31 +00:00
pipeline - > dirty = true ;
}
2022-06-24 00:07:39 +00:00
if ( ! pipeline - > shader & & pipeline - > info . shader ! = shader - > gpu ) {
2022-05-30 22:36:31 +00:00
pipeline - > info . shader = shader - > gpu ;
pipeline - > info . flags = NULL ;
pipeline - > info . flagCount = 0 ;
pipeline - > dirty = true ;
}
2022-08-09 03:36:22 +00:00
// Vertex formats
2023-06-24 02:05:42 +00:00
if ( info - > vertex . buffer & & pipeline - > lastVertexBuffer ! = info - > vertex . buffer ) {
pipeline - > lastVertexFormat = ~ 0u ;
pipeline - > lastVertexBuffer = info - > vertex . buffer ;
2022-08-13 00:43:41 +00:00
pipeline - > dirty = true ;
2023-06-11 05:06:29 +00:00
const DataField * format = info - > vertex . buffer - > info . format ;
2023-01-16 13:15:13 +00:00
2023-06-24 02:05:42 +00:00
pipeline - > info . vertex . bufferCount = 2 ;
pipeline - > info . vertex . attributeCount = shader - > attributeCount ;
pipeline - > info . vertex . bufferStrides [ 0 ] = format - > stride ;
pipeline - > info . vertex . bufferStrides [ 1 ] = 0 ;
for ( uint32_t i = 0 ; i < shader - > attributeCount ; i + + ) {
ShaderAttribute * attribute = & shader - > attributes [ i ] ;
bool found = false ;
2024-04-16 22:04:20 +00:00
for ( uint32_t j = 0 ; j < MAX ( format - > fieldCount , 1 ) ; j + + ) {
const DataField * field = format - > fieldCount > 0 ? & format - > fields [ j ] : format ;
2023-08-25 20:42:09 +00:00
if ( field - > hash = = attribute - > hash | | field - > hash = = attribute - > location ) {
lovrCheck ( field - > type < TYPE_MAT2 , " Currently vertex attributes can not use matrix or index types " ) ;
2022-08-13 00:43:41 +00:00
pipeline - > info . vertex . attributes [ i ] = ( gpu_attribute ) {
2023-06-24 02:05:42 +00:00
. buffer = 0 ,
2022-08-13 00:43:41 +00:00
. location = attribute - > location ,
2023-08-25 20:42:09 +00:00
. offset = field - > offset ,
. type = field - > type
2022-08-13 00:43:41 +00:00
} ;
2023-06-24 02:05:42 +00:00
found = true ;
break ;
2022-05-30 22:36:31 +00:00
}
}
2023-06-24 02:05:42 +00:00
if ( ! found ) {
pipeline - > info . vertex . attributes [ i ] = ( gpu_attribute ) {
. buffer = 1 ,
. location = attribute - > location ,
. offset = attribute - > location = = LOCATION_COLOR ? 16 : 0 ,
. type = GPU_TYPE_F32x4
} ;
}
}
} else if ( ! info - > vertex . buffer & & pipeline - > lastVertexFormat ! = info - > vertex . format ) {
pipeline - > lastVertexFormat = info - > vertex . format ;
pipeline - > lastVertexBuffer = NULL ;
pipeline - > info . vertex = state . vertexFormats [ info - > vertex . format ] ;
pipeline - > dirty = true ;
if ( shader - > hasCustomAttributes ) {
for ( uint32_t i = 0 ; i < shader - > attributeCount ; i + + ) {
if ( shader - > attributes [ i ] . location < 10 ) {
pipeline - > info . vertex . attributes [ pipeline - > info . vertex . attributeCount + + ] = ( gpu_attribute ) {
. buffer = 1 ,
. location = shader - > attributes [ i ] . location ,
. type = GPU_TYPE_F32x4 ,
. offset = shader - > attributes [ i ] . location = = LOCATION_COLOR ? 16 : 0
} ;
2022-06-16 03:46:43 +00:00
}
}
}
2022-05-30 22:36:31 +00:00
}
2023-04-30 06:02:37 +00:00
if ( pipeline - > dirty ) {
pipeline - > dirty = false ;
draw - > pipelineInfo = lovrPassAllocate ( pass , sizeof ( gpu_pipeline_info ) ) ;
memcpy ( draw - > pipelineInfo , & pipeline - > info , sizeof ( pipeline - > info ) ) ;
draw - > pipeline = NULL ;
} else {
2023-11-18 10:02:05 +00:00
draw - > pipelineInfo = prev - > pipelineInfo ;
draw - > pipeline = prev - > pipeline ;
2022-05-30 22:36:31 +00:00
}
}
2024-02-20 23:07:30 +00:00
static void lovrPassResolveVertices ( Pass * pass , DrawInfo * info , Draw * draw ) {
2023-12-30 22:17:20 +00:00
CachedShape * cached = info - > hash ? & pass - > geocache [ info - > hash & ( COUNTOF ( pass - > geocache ) - 1 ) ] : NULL ;
2022-08-09 03:36:22 +00:00
2023-12-30 22:17:20 +00:00
if ( cached & & cached - > hash = = info - > hash ) {
draw - > vertexBuffer = cached - > vertexBuffer ;
draw - > indexBuffer = cached - > indexBuffer ;
draw - > start = cached - > start ;
draw - > baseVertex = cached - > baseVertex ;
2024-03-10 17:36:13 +00:00
draw - > vertexBufferOffset = cached - > vertexBufferOffset ;
2023-04-30 06:02:37 +00:00
* info - > vertex . pointer = NULL ;
* info - > index . pointer = NULL ;
return ;
}
2022-05-30 22:36:31 +00:00
2023-04-30 06:02:37 +00:00
if ( ! info - > vertex . buffer & & info - > vertex . count > 0 ) {
lovrCheck ( info - > vertex . count < = UINT16_MAX , " Shape has too many vertices (max is 65535) " ) ;
uint32_t stride = state . vertexFormats [ info - > vertex . format ] . bufferStrides [ 0 ] ;
2023-12-30 20:39:50 +00:00
BufferView view = lovrPassGetBuffer ( pass , info - > vertex . count * stride , stride ) ;
* info - > vertex . pointer = view . pointer ;
draw - > vertexBuffer = view . buffer ;
2024-03-10 17:36:13 +00:00
draw - > vertexBufferOffset = view . offset ;
2023-04-30 06:02:37 +00:00
} else if ( info - > vertex . buffer ) {
2023-12-30 22:17:20 +00:00
Buffer * buffer = info - > vertex . buffer ;
uint32_t stride = buffer - > info . format - > stride ;
lovrCheck ( stride < = state . limits . vertexBufferStride , " Vertex buffer stride exceeds vertexBufferStride limit " ) ;
trackBuffer ( pass , buffer , GPU_PHASE_INPUT_VERTEX , GPU_CACHE_VERTEX ) ;
draw - > vertexBuffer = buffer - > gpu ;
2024-03-10 17:36:13 +00:00
draw - > vertexBufferOffset = buffer - > base ;
2023-04-30 06:02:37 +00:00
} else {
draw - > vertexBuffer = state . defaultBuffer - > gpu ;
2024-03-10 17:36:13 +00:00
draw - > vertexBufferOffset = state . defaultBuffer - > base ;
2022-06-08 03:42:10 +00:00
}
2023-04-30 06:02:37 +00:00
if ( ! info - > index . buffer & & info - > index . count > 0 ) {
2023-12-30 20:39:50 +00:00
BufferView view = lovrPassGetBuffer ( pass , info - > index . count * 2 , 2 ) ;
* info - > index . pointer = view . pointer ;
draw - > indexBuffer = view . buffer ;
draw - > start = view . offset / 2 ;
2023-04-30 06:02:37 +00:00
} else if ( info - > index . buffer ) {
trackBuffer ( pass , info - > index . buffer , GPU_PHASE_INPUT_INDEX , GPU_CACHE_INDEX ) ;
draw - > indexBuffer = info - > index . buffer - > gpu ;
2023-06-24 04:14:19 +00:00
draw - > flags | = info - > index . buffer - > info . format - > stride = = 4 ? DRAW_INDEX32 : 0 ;
2023-12-30 22:17:20 +00:00
draw - > start + = info - > index . buffer - > base / info - > index . buffer - > info . format - > stride ;
2023-06-05 07:01:52 +00:00
} else {
draw - > indexBuffer = NULL ;
2023-04-30 06:02:37 +00:00
}
2022-05-30 22:36:31 +00:00
2023-12-30 22:17:20 +00:00
if ( info - > hash ) {
cached - > hash = info - > hash ;
cached - > vertexBuffer = draw - > vertexBuffer ;
cached - > indexBuffer = draw - > indexBuffer ;
cached - > start = draw - > start ;
cached - > baseVertex = draw - > baseVertex ;
2024-03-10 17:36:13 +00:00
cached - > vertexBufferOffset = draw - > vertexBufferOffset ;
2023-04-30 06:02:37 +00:00
}
}
2022-05-30 22:36:31 +00:00
2023-04-30 06:02:37 +00:00
static gpu_bundle_info * lovrPassResolveBindings ( Pass * pass , Shader * shader , gpu_bundle_info * previous ) {
if ( shader - > resourceCount = = 0 ) {
return NULL ;
2022-05-30 22:36:31 +00:00
}
2023-04-30 06:02:37 +00:00
if ( ~ pass - > flags & DIRTY_BINDINGS ) {
return previous ;
}
2022-08-09 03:36:22 +00:00
2023-04-30 06:02:37 +00:00
gpu_bundle_info * bundle = lovrPassAllocate ( pass , sizeof ( gpu_bundle_info ) ) ;
bundle - > bindings = lovrPassAllocate ( pass , shader - > resourceCount * sizeof ( gpu_binding ) ) ;
bundle - > layout = state . layouts . data [ shader - > layout ] . gpu ;
bundle - > count = shader - > resourceCount ;
2022-08-09 03:36:22 +00:00
2023-04-30 06:02:37 +00:00
for ( uint32_t i = 0 ; i < bundle - > count ; i + + ) {
bundle - > bindings [ i ] = pass - > bindings [ shader - > resources [ i ] . binding ] ;
bundle - > bindings [ i ] . type = shader - > resources [ i ] . type ;
2024-02-24 23:45:10 +00:00
bundle - > bindings [ i ] . number = shader - > resources [ i ] . binding ;
bundle - > bindings [ i ] . count = 0 ;
2022-06-17 06:49:09 +00:00
}
2022-08-09 03:36:22 +00:00
2023-04-30 06:02:37 +00:00
pass - > flags & = ~ DIRTY_BINDINGS ;
return bundle ;
2022-06-17 06:49:09 +00:00
}
2024-02-20 23:07:30 +00:00
static void lovrPassResolveUniforms ( Pass * pass , Shader * shader , gpu_buffer * * buffer , uint32_t * offset ) {
BufferView view = lovrPassGetBuffer ( pass , shader - > uniformSize , state . limits . uniformBufferAlign ) ;
memcpy ( view . pointer , pass - > uniforms , shader - > uniformSize ) ;
* buffer = view . buffer ;
* offset = view . offset ;
2022-05-30 22:36:31 +00:00
}
2023-04-30 06:02:37 +00:00
void lovrPassDraw ( Pass * pass , DrawInfo * info ) {
2023-11-18 10:02:05 +00:00
if ( pass - > drawCount > = pass - > drawCapacity ) {
lovrAssert ( pass - > drawCount < 1 < < 16 , " Pass has too many draws! " ) ;
pass - > drawCapacity = pass - > drawCapacity > 0 ? pass - > drawCapacity < < 1 : 1 ;
Draw * draws = lovrPassAllocate ( pass , pass - > drawCapacity * sizeof ( Draw ) ) ;
2023-12-30 22:17:20 +00:00
if ( pass - > draws ) memcpy ( draws , pass - > draws , pass - > drawCount * sizeof ( Draw ) ) ;
2023-11-18 10:02:05 +00:00
pass - > draws = draws ;
2022-08-09 03:36:22 +00:00
}
2024-02-20 23:07:30 +00:00
Draw * previous = pass - > drawCount > 0 ? & pass - > draws [ pass - > drawCount - 1 ] : NULL ;
2023-11-18 10:02:05 +00:00
Draw * draw = & pass - > draws [ pass - > drawCount + + ] ;
2022-05-30 22:36:31 +00:00
2023-04-30 06:02:37 +00:00
draw - > flags = 0 ;
2023-06-01 01:56:09 +00:00
draw - > tally = pass - > tally . active ? pass - > tally . count : ~ 0u ;
2023-06-06 02:44:53 +00:00
draw - > camera = pass - > cameraCount - 1 ;
pass - > flags & = ~ DIRTY_CAMERA ;
2022-05-30 22:36:31 +00:00
2023-04-30 06:02:37 +00:00
draw - > shader = pass - > pipeline - > shader ? pass - > pipeline - > shader : lovrGraphicsGetDefaultShader ( info - > shader ) ;
2024-02-20 23:07:30 +00:00
lovrCheck ( draw - > shader - > info . type = = SHADER_GRAPHICS , " Tried to draw while a compute shader is active " ) ;
2023-04-30 06:02:37 +00:00
lovrRetain ( draw - > shader ) ;
2022-05-30 22:36:31 +00:00
2023-04-30 06:02:37 +00:00
draw - > material = info - > material ;
if ( ! draw - > material ) draw - > material = pass - > pipeline - > material ;
if ( ! draw - > material ) draw - > material = state . defaultMaterial ;
trackMaterial ( pass , draw - > material ) ;
draw - > start = info - > start ;
draw - > count = info - > count > 0 ? info - > count : ( info - > index . buffer | | info - > index . count > 0 ? info - > index . count : info - > vertex . count ) ;
draw - > instances = MAX ( info - > instances , 1 ) ;
2024-01-18 00:05:37 +00:00
draw - > baseVertex = info - > baseVertex ;
2022-05-30 22:36:31 +00:00
2024-02-20 23:07:30 +00:00
lovrPassResolvePipeline ( pass , info , draw , previous ) ;
lovrPassResolveVertices ( pass , info , draw ) ;
draw - > bundleInfo = lovrPassResolveBindings ( pass , draw - > shader , previous ? previous - > bundleInfo : NULL ) ;
2023-04-30 06:02:37 +00:00
2024-03-01 20:44:05 +00:00
if ( draw - > shader - > uniformCount > 0 & & pass - > flags & DIRTY_UNIFORMS ) {
2024-02-20 23:07:30 +00:00
lovrPassResolveUniforms ( pass , draw - > shader , & draw - > uniformBuffer , & draw - > uniformOffset ) ;
pass - > flags & = ~ DIRTY_UNIFORMS ;
} else {
draw - > uniformBuffer = previous ? previous - > uniformBuffer : NULL ;
draw - > uniformOffset = previous ? previous - > uniformOffset : 0 ;
}
2023-04-30 06:02:37 +00:00
2023-06-23 21:41:39 +00:00
if ( pass - > pipeline - > viewCull & & info - > bounds ) {
memcpy ( draw - > bounds , info - > bounds , sizeof ( draw - > bounds ) ) ;
draw - > flags | = DRAW_HAS_BOUNDS ;
pass - > flags | = NEEDS_VIEW_CULL ;
}
2023-04-30 06:02:37 +00:00
mat4_init ( draw - > transform , pass - > transform ) ;
if ( info - > transform ) mat4_mul ( draw - > transform , info - > transform ) ;
memcpy ( draw - > color , pass - > pipeline - > color , 4 * sizeof ( float ) ) ;
2022-05-30 22:36:31 +00:00
}
2022-06-04 08:33:50 +00:00
void lovrPassPoints ( Pass * pass , uint32_t count , float * * points ) {
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2023-06-11 05:06:29 +00:00
. mode = DRAW_POINTS ,
2022-05-30 22:36:31 +00:00
. vertex . format = VERTEX_POINT ,
2022-06-04 08:33:50 +00:00
. vertex . pointer = ( void * * ) points ,
2022-05-30 22:36:31 +00:00
. vertex . count = count
} ) ;
}
2022-06-04 08:33:50 +00:00
void lovrPassLine ( Pass * pass , uint32_t count , float * * points ) {
2022-12-05 02:01:20 +00:00
lovrCheck ( count > = 2 , " Need at least 2 points to make a line " ) ;
2022-06-04 08:33:50 +00:00
uint16_t * indices ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2023-06-11 05:06:29 +00:00
. mode = DRAW_LINES ,
2022-06-04 08:33:50 +00:00
. vertex . format = VERTEX_POINT ,
. vertex . pointer = ( void * * ) points ,
. vertex . count = count ,
. index . pointer = ( void * * ) & indices ,
. index . count = 2 * ( count - 1 )
} ) ;
for ( uint32_t i = 0 ; i < count - 1 ; i + + ) {
indices [ 2 * i + 0 ] = i ;
indices [ 2 * i + 1 ] = i + 1 ;
}
}
2022-06-22 07:05:26 +00:00
void lovrPassPlane ( Pass * pass , float * transform , DrawStyle style , uint32_t cols , uint32_t rows ) {
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_PLANE , style , cols , rows } ;
2022-06-04 08:34:13 +00:00
ShapeVertex * vertices ;
uint16_t * indices ;
2022-06-04 18:19:28 +00:00
uint32_t vertexCount = ( cols + 1 ) * ( rows + 1 ) ;
2022-06-22 07:05:26 +00:00
uint32_t indexCount ;
if ( style = = STYLE_LINE ) {
indexCount = 2 * ( rows + 1 ) + 2 * ( cols + 1 ) ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_LINES ,
2022-06-22 07:05:26 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , .5f , .5f , 0.f } ,
2022-06-22 07:05:26 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
2022-07-15 02:23:02 +00:00
. index . count = indexCount
2022-06-22 07:05:26 +00:00
} ) ;
} else {
indexCount = ( cols * rows ) * 6 ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-22 07:05:26 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , .5f , .5f , 0.f } ,
2022-06-22 07:05:26 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
}
2022-06-04 08:34:13 +00:00
2022-07-12 03:52:35 +00:00
if ( ! vertices ) {
return ;
}
2022-06-04 18:19:28 +00:00
for ( uint32_t y = 0 ; y < = rows ; y + + ) {
float v = y * ( 1.f / rows ) ;
for ( uint32_t x = 0 ; x < = cols ; x + + ) {
float u = x * ( 1.f / cols ) ;
* vertices + + = ( ShapeVertex ) {
. position = { u - .5f , .5f - v , 0.f } ,
. normal = { 0.f , 0.f , 1.f } ,
. uv = { u , v }
} ;
2022-06-04 08:34:13 +00:00
}
}
2022-06-22 07:05:26 +00:00
if ( style = = STYLE_LINE ) {
for ( uint32_t y = 0 ; y < = rows ; y + + ) {
uint16_t a = y * ( cols + 1 ) ;
uint16_t b = a + cols ;
uint16_t line [ ] = { a , b } ;
memcpy ( indices , line , sizeof ( line ) ) ;
indices + = COUNTOF ( line ) ;
}
for ( uint32_t x = 0 ; x < = cols ; x + + ) {
uint16_t a = x ;
uint16_t b = x + ( ( cols + 1 ) * rows ) ;
uint16_t line [ ] = { a , b } ;
memcpy ( indices , line , sizeof ( line ) ) ;
indices + = COUNTOF ( line ) ;
}
} else {
for ( uint32_t y = 0 ; y < rows ; y + + ) {
for ( uint32_t x = 0 ; x < cols ; x + + ) {
uint16_t a = ( y * ( cols + 1 ) ) + x ;
uint16_t b = a + 1 ;
uint16_t c = a + cols + 1 ;
uint16_t d = a + cols + 2 ;
2022-07-04 03:07:00 +00:00
uint16_t cell [ ] = { a , c , b , b , c , d } ;
2022-06-22 07:05:26 +00:00
memcpy ( indices , cell , sizeof ( cell ) ) ;
indices + = COUNTOF ( cell ) ;
}
2022-06-04 08:34:13 +00:00
}
}
}
2023-01-27 05:24:37 +00:00
void lovrPassRoundrect ( Pass * pass , float * transform , float r , uint32_t segments ) {
bool thicc = vec3_length ( transform + 8 ) > 0.f ;
float w = vec3_length ( transform + 0 ) ;
float h = vec3_length ( transform + 4 ) ;
r = MIN ( MIN ( r , w / 2.f ) , h / 2.f ) ;
float rx = MIN ( r / w , .5f ) ;
float ry = MIN ( r / h , .5f ) ;
uint32_t n = segments + 1 ;
2023-04-05 00:40:24 +00:00
if ( ! thicc & & ( r < = 0.f | | w = = 0.f | | h = = 0.f ) ) {
2023-01-27 05:24:37 +00:00
lovrPassPlane ( pass , transform , STYLE_FILL , 1 , 1 ) ;
return ;
}
uint32_t vertexCount ;
uint32_t indexCount ;
if ( thicc ) {
vertexCount = 8 + ( segments + 1 ) * 16 ;
indexCount = 3 * 8 * segments + 6 * 4 * ( segments + 1 ) + 60 ;
} else {
vertexCount = 4 + ( segments + 1 ) * 4 ;
indexCount = 3 * 4 * segments + 30 ;
}
ShapeVertex * vertices ;
uint16_t * indices ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2023-01-27 05:24:37 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , .5f , .5f , .5f } ,
2023-01-27 05:24:37 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
uint32_t c = vertexCount - ( thicc ? 8 : 4 ) ;
ShapeVertex * corner = vertices + c ;
float angle = 0.f ;
float step = ( float ) M_PI / 2.f / segments ;
float x = .5f - rx ;
float y = .5f - ry ;
float z = .5f ;
float nz = 1.f ;
2023-03-02 00:13:15 +00:00
// If the rounded rectangle is thick, loop twice (front and back), otherwise do only a single side
2023-03-03 20:48:41 +00:00
for ( uint32_t side = 0 ; side < = ( uint32_t ) thicc ; side + + , z * = - 1.f , nz * = - 1.f , angle = 0.f ) {
2023-01-27 05:24:37 +00:00
for ( uint32_t i = 0 ; i < n ; i + + , angle + = step ) {
float c = cosf ( angle ) ;
float s = sinf ( angle ) ;
2023-01-27 08:32:21 +00:00
vertices [ n * 0 + i ] = ( ShapeVertex ) { { x + c * rx , y + s * ry , z } , { 0.f , 0.f , nz } , { .5f + x + c * rx , .5f - y - s * ry } } ;
vertices [ n * 1 + i ] = ( ShapeVertex ) { { - x - s * rx , y + c * ry , z } , { 0.f , 0.f , nz } , { .5f - x - s * rx , .5f - y - c * ry } } ;
vertices [ n * 2 + i ] = ( ShapeVertex ) { { - x - c * rx , - y - s * ry , z } , { 0.f , 0.f , nz } , { .5f - x - c * rx , .5f + y + s * ry } } ;
vertices [ n * 3 + i ] = ( ShapeVertex ) { { x + s * rx , - y - c * ry , z } , { 0.f , 0.f , nz } , { .5f + x + s * rx , .5f + y + c * ry } } ;
2023-01-27 05:24:37 +00:00
if ( thicc ) {
2023-01-27 08:32:21 +00:00
vertices [ n * 8 + i ] = ( ShapeVertex ) { { x + c * rx , y + s * ry , z } , { c , s , 0.f } , { .5f + x + c * rx , .5f - y - s * ry } } ;
vertices [ n * 9 + i ] = ( ShapeVertex ) { { - x - s * rx , y + c * ry , z } , { c , s , 0.f } , { .5f - x - s * rx , .5f - y - c * ry } } ;
vertices [ n * 10 + i ] = ( ShapeVertex ) { { - x - c * rx , - y - s * ry , z } , { c , s , 0.f } , { .5f - x - c * rx , .5f + y + s * ry } } ;
vertices [ n * 11 + i ] = ( ShapeVertex ) { { x + s * rx , - y - c * ry , z } , { c , s , 0.f } , { .5f + x + s * rx , .5f + y + c * ry } } ;
2023-01-27 05:24:37 +00:00
}
}
vertices + = 4 * n ;
// 4 extra corner vertices per-side, used for the triangle fans and 9-slice quads
2023-01-27 08:32:21 +00:00
* corner + + = ( ShapeVertex ) { { x , y , z } , { 0.f , 0.f , nz } , { .5f + x , .5f - y } } ;
* corner + + = ( ShapeVertex ) { { - x , y , z } , { 0.f , 0.f , nz } , { .5f - x , .5f - y } } ;
* corner + + = ( ShapeVertex ) { { - x , - y , z } , { 0.f , 0.f , nz } , { .5f - x , .5f + y } } ;
* corner + + = ( ShapeVertex ) { { x , - y , z } , { 0.f , 0.f , nz } , { .5f + x , .5f + y } } ;
2023-01-27 05:24:37 +00:00
}
uint32_t m = segments ;
uint16_t front [ ] = {
n * 0 + m , n * 1 , c + 0 , c + 0 , n * 1 , c + 1 , // top
c + 1 , n * 1 + m , c + 2 , c + 2 , n * 1 + m , n * 2 , // left
n * 0 , c + 0 , n * 3 + m , n * 3 + m , c + 0 , c + 3 , // right
c + 3 , c + 2 , n * 3 , n * 3 , c + 2 , 2 * n + m , // bot
c + 0 , c + 1 , c + 3 , c + 3 , c + 1 , c + 2 // center
} ;
memcpy ( indices , front , sizeof ( front ) ) ;
indices + = COUNTOF ( front ) ;
for ( uint32_t i = 0 ; i < 4 ; i + + ) {
for ( uint32_t j = 0 ; j < segments ; j + + ) {
memcpy ( indices , ( uint16_t [ ] ) { c + i , n * i + j , n * i + j + 1 } , 3 * sizeof ( uint16_t ) ) ;
indices + = 3 ;
}
}
if ( thicc ) {
uint16_t back [ ] = {
n * 4 + m , c + 4 , n * 5 , n * 5 , c + 4 , c + 5 , // top
c + 5 , c + 6 , n * 5 + m , n * 5 + m , c + 6 , n * 6 , // left
n * 4 , n * 7 + m , c + 4 , c + 4 , n * 7 + m , c + 7 , // right
c + 7 , n * 7 , c + 6 , c + 6 , n * 7 , 6 * n + m , // bot
c + 4 , c + 7 , c + 5 , c + 5 , c + 7 , c + 6 // center
} ;
memcpy ( indices , back , sizeof ( back ) ) ;
indices + = COUNTOF ( back ) ;
for ( uint32_t i = 4 ; i < 8 ; i + + ) {
for ( uint32_t j = 0 ; j < segments ; j + + ) {
memcpy ( indices , ( uint16_t [ ] ) { n * i + j , c + i , n * i + j + 1 } , 3 * sizeof ( uint16_t ) ) ;
indices + = 3 ;
}
}
// Stitch sides together
for ( uint32_t i = 0 ; i < 4 * n - 1 ; i + + ) {
uint16_t a = 8 * n + i ;
uint16_t b = 12 * n + i ;
memcpy ( indices , ( uint16_t [ ] ) { a , b , b + 1 , a , b + 1 , a + 1 } , 6 * sizeof ( uint16_t ) ) ;
indices + = 6 ;
}
// Handle discontinuity
uint16_t a = 11 * n + m ;
uint16_t b = 15 * n + m ;
uint16_t c = 12 * n ;
uint16_t d = 8 * n ;
memcpy ( indices , ( uint16_t [ ] ) { a , b , c , a , c , d } , 6 * sizeof ( uint16_t ) ) ;
indices + = 6 ;
}
}
2022-06-22 07:05:26 +00:00
void lovrPassBox ( Pass * pass , float * transform , DrawStyle style ) {
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_BOX , style } ;
ShapeVertex * vertices ;
uint16_t * indices ;
2022-06-22 07:05:26 +00:00
if ( style = = STYLE_LINE ) {
2022-07-12 03:52:35 +00:00
static ShapeVertex vertexData [ ] = {
2022-06-22 07:05:26 +00:00
{ { - .5f , .5f , - .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } } , // Front
{ { .5f , .5f , - .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } } ,
{ { .5f , - .5f , - .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } } ,
{ { - .5f , - .5f , - .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } } ,
{ { - .5f , .5f , .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } } , // Back
{ { .5f , .5f , .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } } ,
{ { .5f , - .5f , .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } } ,
{ { - .5f , - .5f , .5f } , { 0.f , 0.f , 0.f } , { 0.f , 0.f } }
} ;
2022-06-04 18:28:35 +00:00
2022-07-12 03:52:35 +00:00
static uint16_t indexData [ ] = {
2022-06-22 07:05:26 +00:00
0 , 1 , 1 , 2 , 2 , 3 , 3 , 0 , // Front
4 , 5 , 5 , 6 , 6 , 7 , 7 , 4 , // Back
0 , 4 , 1 , 5 , 2 , 6 , 3 , 7 // Connections
} ;
2022-06-04 18:28:35 +00:00
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_LINES ,
2022-06-22 07:05:26 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , .5f , .5f , .5f } ,
2022-07-12 03:52:35 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = COUNTOF ( vertexData ) ,
. index . pointer = ( void * * ) & indices ,
. index . count = COUNTOF ( indexData )
2022-06-22 07:05:26 +00:00
} ) ;
2022-07-12 03:52:35 +00:00
if ( vertices ) {
memcpy ( vertices , vertexData , sizeof ( vertexData ) ) ;
memcpy ( indices , indexData , sizeof ( indexData ) ) ;
}
2022-06-22 07:05:26 +00:00
} else {
2022-07-12 03:52:35 +00:00
static ShapeVertex vertexData [ ] = {
2022-06-22 07:05:26 +00:00
{ { - .5f , - .5f , - .5f } , { 0.f , 0.f , - 1.f } , { 0.f , 0.f } } , // Front
{ { - .5f , .5f , - .5f } , { 0.f , 0.f , - 1.f } , { 0.f , 1.f } } ,
{ { .5f , - .5f , - .5f } , { 0.f , 0.f , - 1.f } , { 1.f , 0.f } } ,
{ { .5f , .5f , - .5f } , { 0.f , 0.f , - 1.f } , { 1.f , 1.f } } ,
{ { .5f , .5f , - .5f } , { 1.f , 0.f , 0.f } , { 0.f , 1.f } } , // Right
{ { .5f , .5f , .5f } , { 1.f , 0.f , 0.f } , { 1.f , 1.f } } ,
{ { .5f , - .5f , - .5f } , { 1.f , 0.f , 0.f } , { 0.f , 0.f } } ,
{ { .5f , - .5f , .5f } , { 1.f , 0.f , 0.f } , { 1.f , 0.f } } ,
{ { .5f , - .5f , .5f } , { 0.f , 0.f , 1.f } , { 0.f , 0.f } } , // Back
{ { .5f , .5f , .5f } , { 0.f , 0.f , 1.f } , { 0.f , 1.f } } ,
{ { - .5f , - .5f , .5f } , { 0.f , 0.f , 1.f } , { 1.f , 0.f } } ,
{ { - .5f , .5f , .5f } , { 0.f , 0.f , 1.f } , { 1.f , 1.f } } ,
{ { - .5f , .5f , .5f } , { - 1.f , 0.f , 0.f } , { 0.f , 1.f } } , // Left
{ { - .5f , .5f , - .5f } , { - 1.f , 0.f , 0.f } , { 1.f , 1.f } } ,
{ { - .5f , - .5f , .5f } , { - 1.f , 0.f , 0.f } , { 0.f , 0.f } } ,
{ { - .5f , - .5f , - .5f } , { - 1.f , 0.f , 0.f } , { 1.f , 0.f } } ,
{ { - .5f , - .5f , - .5f } , { 0.f , - 1.f , 0.f } , { 0.f , 0.f } } , // Bottom
{ { .5f , - .5f , - .5f } , { 0.f , - 1.f , 0.f } , { 1.f , 0.f } } ,
{ { - .5f , - .5f , .5f } , { 0.f , - 1.f , 0.f } , { 0.f , 1.f } } ,
{ { .5f , - .5f , .5f } , { 0.f , - 1.f , 0.f } , { 1.f , 1.f } } ,
{ { - .5f , .5f , - .5f } , { 0.f , 1.f , 0.f } , { 0.f , 1.f } } , // Top
{ { - .5f , .5f , .5f } , { 0.f , 1.f , 0.f } , { 0.f , 0.f } } ,
{ { .5f , .5f , - .5f } , { 0.f , 1.f , 0.f } , { 1.f , 1.f } } ,
{ { .5f , .5f , .5f } , { 0.f , 1.f , 0.f } , { 1.f , 0.f } }
} ;
2022-06-04 18:28:35 +00:00
2022-07-12 03:52:35 +00:00
static uint16_t indexData [ ] = {
2022-06-22 07:05:26 +00:00
0 , 1 , 2 , 2 , 1 , 3 ,
4 , 5 , 6 , 6 , 5 , 7 ,
8 , 9 , 10 , 10 , 9 , 11 ,
12 , 13 , 14 , 14 , 13 , 15 ,
16 , 17 , 18 , 18 , 17 , 19 ,
20 , 21 , 22 , 22 , 21 , 23
} ;
2022-06-04 18:28:35 +00:00
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-22 07:05:26 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , .5f , .5f , .5f } ,
2022-07-12 03:52:35 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = COUNTOF ( vertexData ) ,
. index . pointer = ( void * * ) & indices ,
. index . count = COUNTOF ( indexData )
2022-06-22 07:05:26 +00:00
} ) ;
2022-07-12 03:52:35 +00:00
if ( vertices ) {
memcpy ( vertices , vertexData , sizeof ( vertexData ) ) ;
memcpy ( indices , indexData , sizeof ( indexData ) ) ;
}
2022-06-22 07:05:26 +00:00
}
2022-06-04 18:28:35 +00:00
}
2022-06-22 07:05:26 +00:00
void lovrPassCircle ( Pass * pass , float * transform , DrawStyle style , float angle1 , float angle2 , uint32_t segments ) {
2022-06-05 20:12:49 +00:00
if ( fabsf ( angle1 - angle2 ) > = 2.f * ( float ) M_PI ) {
angle1 = 0.f ;
angle2 = 2.f * ( float ) M_PI ;
}
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_CIRCLE , style , FLOAT_BITS ( angle1 ) , FLOAT_BITS ( angle2 ) , segments } ;
2022-06-24 02:52:37 +00:00
ShapeVertex * vertices ;
uint16_t * indices ;
2022-06-22 07:05:26 +00:00
if ( style = = STYLE_LINE ) {
uint32_t vertexCount = segments + 1 ;
uint32_t indexCount = segments * 2 ;
2022-06-05 20:12:49 +00:00
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_LINES ,
2022-06-22 07:05:26 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , 1.f , 1.f , 0.f } ,
2022-06-22 07:05:26 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
2022-07-12 03:52:35 +00:00
if ( ! vertices ) {
return ;
}
2022-06-22 07:05:26 +00:00
} else {
uint32_t vertexCount = segments + 2 ;
uint32_t indexCount = segments * 3 ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-22 07:05:26 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , 1.f , 1.f , 0.f } ,
2022-06-22 07:05:26 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
2022-06-05 20:12:49 +00:00
2022-07-12 03:52:35 +00:00
if ( ! vertices ) {
return ;
}
2022-06-22 07:05:26 +00:00
// Center
* vertices + + = ( ShapeVertex ) { { 0.f , 0.f , 0.f } , { 0.f , 0.f , 1.f } , { .5f , .5f } } ;
}
2022-06-05 20:12:49 +00:00
float angleShift = ( angle2 - angle1 ) / segments ;
for ( uint32_t i = 0 ; i < = segments ; i + + ) {
float theta = angle1 + i * angleShift ;
2022-06-24 04:23:16 +00:00
float x = cosf ( theta ) ;
float y = sinf ( theta ) ;
2022-06-05 20:12:49 +00:00
* vertices + + = ( ShapeVertex ) { { x , y , 0.f } , { 0.f , 0.f , 1.f } , { x + .5f , .5f - y } } ;
}
2022-06-22 07:05:26 +00:00
if ( style = = STYLE_LINE ) {
for ( uint32_t i = 0 ; i < segments ; i + + ) {
uint16_t segment [ ] = { i , i + 1 } ;
memcpy ( indices , segment , sizeof ( segment ) ) ;
indices + = COUNTOF ( segment ) ;
}
} else {
for ( uint32_t i = 0 ; i < segments ; i + + ) {
uint16_t wedge [ ] = { 0 , i + 1 , i + 2 } ;
memcpy ( indices , wedge , sizeof ( wedge ) ) ;
indices + = COUNTOF ( wedge ) ;
}
2022-06-05 20:12:49 +00:00
}
}
2022-06-24 04:23:16 +00:00
void lovrPassSphere ( Pass * pass , float * transform , uint32_t segmentsH , uint32_t segmentsV ) {
uint32_t vertexCount = 2 + ( segmentsH + 1 ) * ( segmentsV - 1 ) ;
uint32_t indexCount = 2 * 3 * segmentsH + segmentsH * ( segmentsV - 2 ) * 6 ;
ShapeVertex * vertices ;
uint16_t * indices ;
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_SPHERE , segmentsH , segmentsV } ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-24 04:23:16 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , 1.f , 1.f , 1.f } ,
2022-06-24 04:23:16 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
2022-07-12 03:52:35 +00:00
. index . count = indexCount ,
2022-06-24 04:23:16 +00:00
} ) ;
2022-07-12 03:52:35 +00:00
if ( ! vertices ) {
return ;
}
2022-06-24 04:23:16 +00:00
// Top
* vertices + + = ( ShapeVertex ) { { 0.f , 1.f , 0.f } , { 0.f , 1.f , 0.f } , { .5f , 0.f } } ;
// Rings
for ( uint32_t i = 1 ; i < segmentsV ; i + + ) {
float v = i / ( float ) segmentsV ;
float phi = v * ( float ) M_PI ;
float sinphi = sinf ( phi ) ;
float cosphi = cosf ( phi ) ;
for ( uint32_t j = 0 ; j < = segmentsH ; j + + ) {
float u = j / ( float ) segmentsH ;
float theta = u * 2.f * ( float ) M_PI ;
float sintheta = sinf ( theta ) ;
float costheta = cosf ( theta ) ;
float x = sintheta * sinphi ;
float y = cosphi ;
float z = - costheta * sinphi ;
* vertices + + = ( ShapeVertex ) { { x , y , z } , { x , y , z } , { u , v } } ;
}
}
// Bottom
* vertices + + = ( ShapeVertex ) { { 0.f , - 1.f , 0.f } , { 0.f , - 1.f , 0.f } , { .5f , 1.f } } ;
// Top
for ( uint32_t i = 0 ; i < segmentsH ; i + + ) {
uint16_t wedge [ ] = { 0 , i + 2 , i + 1 } ;
memcpy ( indices , wedge , sizeof ( wedge ) ) ;
indices + = COUNTOF ( wedge ) ;
}
// Rings
for ( uint32_t i = 0 ; i < segmentsV - 2 ; i + + ) {
for ( uint32_t j = 0 ; j < segmentsH ; j + + ) {
uint16_t a = 1 + i * ( segmentsH + 1 ) + 0 + j ;
uint16_t b = 1 + i * ( segmentsH + 1 ) + 1 + j ;
uint16_t c = 1 + i * ( segmentsH + 1 ) + 0 + segmentsH + 1 + j ;
uint16_t d = 1 + i * ( segmentsH + 1 ) + 1 + segmentsH + 1 + j ;
uint16_t quad [ ] = { a , b , c , c , b , d } ;
memcpy ( indices , quad , sizeof ( quad ) ) ;
indices + = COUNTOF ( quad ) ;
}
}
// Bottom
for ( uint32_t i = 0 ; i < segmentsH ; i + + ) {
uint16_t wedge [ ] = { vertexCount - 1 , vertexCount - 1 - ( i + 2 ) , vertexCount - 1 - ( i + 1 ) } ;
memcpy ( indices , wedge , sizeof ( wedge ) ) ;
indices + = COUNTOF ( wedge ) ;
}
}
2022-06-25 06:01:22 +00:00
void lovrPassCylinder ( Pass * pass , float * transform , bool capped , float angle1 , float angle2 , uint32_t segments ) {
if ( fabsf ( angle1 - angle2 ) > = 2.f * ( float ) M_PI ) {
angle1 = 0.f ;
angle2 = 2.f * ( float ) M_PI ;
}
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_CYLINDER , capped , FLOAT_BITS ( angle1 ) , FLOAT_BITS ( angle2 ) , segments } ;
2022-06-25 06:01:22 +00:00
uint32_t vertexCount = 2 * ( segments + 1 ) ;
uint32_t indexCount = 6 * segments ;
ShapeVertex * vertices ;
uint16_t * indices ;
if ( capped ) {
vertexCount * = 2 ;
vertexCount + = 2 ;
indexCount + = 3 * segments * 2 ;
}
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-25 06:01:22 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , 1.f , 1.f , .5f } ,
2022-06-25 06:01:22 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
2022-07-12 03:52:35 +00:00
if ( ! vertices ) {
return ;
}
2022-06-25 06:01:22 +00:00
float angleShift = ( angle2 - angle1 ) / segments ;
// Tube
for ( uint32_t i = 0 ; i < = segments ; i + + ) {
float theta = angle1 + i * angleShift ;
float x = cosf ( theta ) ;
float y = sinf ( theta ) ;
* vertices + + = ( ShapeVertex ) { { x , y , - .5f } , { x , y , 0.f } , { x + .5f , .5f - y } } ;
* vertices + + = ( ShapeVertex ) { { x , y , .5f } , { x , y , 0.f } , { x + .5f , .5f - y } } ;
}
// Tube quads
for ( uint32_t i = 0 ; i < segments ; i + + ) {
uint16_t a = i * 2 + 0 ;
uint16_t b = i * 2 + 1 ;
uint16_t c = i * 2 + 2 ;
uint16_t d = i * 2 + 3 ;
2022-07-31 07:24:43 +00:00
uint16_t quad [ ] = { a , c , b , b , c , d } ;
2022-06-25 06:01:22 +00:00
memcpy ( indices , quad , sizeof ( quad ) ) ;
indices + = COUNTOF ( quad ) ;
}
if ( capped ) {
// Cap centers
2022-07-31 07:24:43 +00:00
* vertices + + = ( ShapeVertex ) { { 0.f , 0.f , - .5f } , { 0.f , 0.f , - 1.f } , { .5f , .5f } } ;
* vertices + + = ( ShapeVertex ) { { 0.f , 0.f , .5f } , { 0.f , 0.f , 1.f } , { .5f , .5f } } ;
2022-06-25 06:01:22 +00:00
// Caps
for ( uint32_t i = 0 ; i < = segments ; i + + ) {
float theta = angle1 + i * angleShift ;
float x = cosf ( theta ) ;
float y = sinf ( theta ) ;
2022-07-31 07:24:43 +00:00
* vertices + + = ( ShapeVertex ) { { x , y , - .5f } , { 0.f , 0.f , - 1.f } , { x + .5f , y - .5f } } ;
* vertices + + = ( ShapeVertex ) { { x , y , .5f } , { 0.f , 0.f , 1.f } , { x + .5f , y - .5f } } ;
2022-06-25 06:01:22 +00:00
}
// Cap wedges
uint16_t base = 2 * ( segments + 1 ) ;
for ( uint32_t i = 0 ; i < segments ; i + + ) {
uint16_t a = base + 0 ;
uint16_t b = base + ( i + 1 ) * 2 ;
uint16_t c = base + ( i + 2 ) * 2 ;
2022-07-31 07:24:43 +00:00
uint16_t wedge1 [ ] = { a + 0 , c + 0 , b + 0 } ;
2022-06-25 06:01:22 +00:00
uint16_t wedge2 [ ] = { a + 1 , b + 1 , c + 1 } ;
memcpy ( indices + 0 , wedge1 , sizeof ( wedge1 ) ) ;
memcpy ( indices + 3 , wedge2 , sizeof ( wedge2 ) ) ;
indices + = 6 ;
}
}
}
2022-07-17 23:38:00 +00:00
void lovrPassCone ( Pass * pass , float * transform , uint32_t segments ) {
uint32_t key [ ] = { SHAPE_CONE , segments } ;
uint32_t vertexCount = 2 * segments + 1 ;
uint32_t indexCount = 3 * ( segments - 2 ) + 3 * segments ;
ShapeVertex * vertices ;
uint16_t * indices ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-17 23:38:00 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-07-17 23:38:00 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.0f , 0.f , - .5f , 1.f , 1.f , .5f } ,
2022-07-17 23:38:00 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
if ( ! vertices ) {
return ;
}
for ( uint32_t i = 0 ; i < segments ; i + + ) {
float theta = i * 2.f * ( float ) M_PI / segments ;
float x = cosf ( theta ) ;
float y = sinf ( theta ) ;
float rsqrt3 = .57735f ;
float nx = cosf ( theta ) * rsqrt3 ;
float ny = sinf ( theta ) * rsqrt3 ;
float nz = - rsqrt3 ;
float u = x + .5f ;
float v = .5f - y ;
vertices [ segments * 0 ] = ( ShapeVertex ) { { x , y , 0.f } , { 0.f , 0.f , 1.f } , { u , v } } ;
vertices [ segments * 1 ] = ( ShapeVertex ) { { x , y , 0.f } , { nx , ny , nz } , { u , v } } ;
vertices + + ;
}
vertices [ segments ] = ( ShapeVertex ) { { 0.f , 0.f , - 1.f } , { 0.f , 0.f , 0.f } , { .5f , .5f } } ;
// Base
for ( uint32_t i = 0 ; i < segments - 2 ; i + + ) {
uint16_t tri [ ] = { 0 , i + 1 , i + 2 } ;
memcpy ( indices , tri , sizeof ( tri ) ) ;
indices + = COUNTOF ( tri ) ;
}
// Sides
for ( uint32_t i = 0 ; i < segments ; i + + ) {
2022-08-13 06:30:33 +00:00
uint16_t tri [ ] = { segments + ( i + 1 ) % segments , segments + i , vertexCount - 1 } ;
2022-07-17 23:38:00 +00:00
memcpy ( indices , tri , sizeof ( tri ) ) ;
indices + = COUNTOF ( tri ) ;
}
}
2022-07-09 23:38:25 +00:00
void lovrPassCapsule ( Pass * pass , float * transform , uint32_t segments ) {
float sx = vec3_length ( transform + 0 ) ;
float sy = vec3_length ( transform + 4 ) ;
float sz = vec3_length ( transform + 8 ) ;
2023-08-23 03:36:17 +00:00
float length = sz * .5f ;
float radius = sx ;
if ( length = = 0.f ) {
float rotation [ 4 ] ;
vec3_cross ( vec3_init ( transform + 8 , transform + 0 ) , transform + 4 ) ;
vec3_scale ( transform + 8 , 1.f / radius ) ;
mat4_rotateQuat ( transform , quat_fromAngleAxis ( rotation , ( float ) M_PI / 2.f , 1.f , 0.f , 0.f ) ) ;
lovrPassSphere ( pass , transform , segments , segments ) ;
return ;
}
2022-07-09 23:38:25 +00:00
vec3_scale ( transform + 0 , 1.f / sx ) ;
vec3_scale ( transform + 4 , 1.f / sy ) ;
vec3_scale ( transform + 8 , 1.f / sz ) ;
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_CAPSULE , FLOAT_BITS ( radius ) , FLOAT_BITS ( length ) , segments } ;
2022-07-09 23:38:25 +00:00
uint32_t rings = segments / 2 ;
uint32_t vertexCount = 2 * ( 1 + rings * ( segments + 1 ) ) ;
uint32_t indexCount = 2 * ( 3 * segments + 6 * segments * ( rings - 1 ) ) + 6 * segments ;
ShapeVertex * vertices ;
uint16_t * indices ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-07-09 23:38:25 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , radius , radius , length + radius } ,
2022-07-09 23:38:25 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
2022-07-17 23:38:00 +00:00
. index . count = indexCount
2022-07-09 23:38:25 +00:00
} ) ;
2022-07-12 03:52:35 +00:00
if ( ! vertices ) {
return ;
}
2022-07-09 23:38:25 +00:00
float tip = length + radius ;
uint32_t h = vertexCount / 2 ;
vertices [ 0 ] = ( ShapeVertex ) { { 0.f , 0.f , - tip } , { 0.f , 0.f , - 1.f } , { .5f , 0.f } } ;
vertices [ h ] = ( ShapeVertex ) { { 0.f , 0.f , tip } , { 0.f , 0.f , 1.f } , { .5f , 1.f } } ;
vertices + + ;
for ( uint32_t i = 1 ; i < = rings ; i + + ) {
float v = i / ( float ) rings ;
float phi = v * ( float ) M_PI / 2.f ;
float sinphi = sinf ( phi ) ;
float cosphi = cosf ( phi ) ;
for ( uint32_t j = 0 ; j < = segments ; j + + ) {
float u = j / ( float ) segments ;
float theta = u * ( float ) M_PI * 2.f ;
float sintheta = sinf ( theta ) ;
float costheta = cosf ( theta ) ;
float x = costheta * sinphi ;
float y = sintheta * sinphi ;
float z = cosphi ;
vertices [ 0 ] = ( ShapeVertex ) { { x * radius , y * radius , - ( length + z * radius ) } , { x , y , - z } , { u , v } } ;
vertices [ h ] = ( ShapeVertex ) { { x * radius , y * radius , ( length + z * radius ) } , { x , y , z } , { u , 1.f - v } } ;
vertices + + ;
}
}
uint16_t * i1 = indices ;
uint16_t * i2 = indices + ( indexCount - 6 * segments ) / 2 ;
for ( uint32_t i = 0 ; i < segments ; i + + ) {
uint16_t wedge1 [ ] = { 0 , 0 + i + 2 , 0 + i + 1 } ;
uint16_t wedge2 [ ] = { h , h + i + 1 , h + i + 2 } ;
memcpy ( i1 , wedge1 , sizeof ( wedge1 ) ) ;
memcpy ( i2 , wedge2 , sizeof ( wedge2 ) ) ;
i1 + = COUNTOF ( wedge1 ) ;
i2 + = COUNTOF ( wedge2 ) ;
}
for ( uint32_t i = 0 ; i < rings - 1 ; i + + ) {
for ( uint32_t j = 0 ; j < segments ; j + + ) {
uint16_t a = 1 + i * ( segments + 1 ) + 0 + j ;
uint16_t b = 1 + i * ( segments + 1 ) + 1 + j ;
uint16_t c = 1 + i * ( segments + 1 ) + 0 + segments + 1 + j ;
uint16_t d = 1 + i * ( segments + 1 ) + 1 + segments + 1 + j ;
uint16_t quad1 [ ] = { a , b , c , c , b , d } ;
uint16_t quad2 [ ] = { h + a , h + c , h + b , h + b , h + c , h + d } ;
memcpy ( i1 , quad1 , sizeof ( quad1 ) ) ;
memcpy ( i2 , quad2 , sizeof ( quad2 ) ) ;
i1 + = COUNTOF ( quad1 ) ;
i2 + = COUNTOF ( quad2 ) ;
}
}
for ( uint32_t i = 0 ; i < segments ; i + + ) {
uint16_t a = h - segments - 1 + i ;
uint16_t b = h - segments - 1 + i + 1 ;
uint16_t c = vertexCount - segments - 1 + i ;
uint16_t d = vertexCount - segments - 1 + i + 1 ;
uint16_t quad [ ] = { a , b , c , c , b , d } ;
memcpy ( i2 , quad , sizeof ( quad ) ) ;
i2 + = COUNTOF ( quad ) ;
}
}
2022-06-24 02:52:37 +00:00
void lovrPassTorus ( Pass * pass , float * transform , uint32_t segmentsT , uint32_t segmentsP ) {
float sx = vec3_length ( transform + 0 ) ;
float sy = vec3_length ( transform + 4 ) ;
float sz = vec3_length ( transform + 8 ) ;
vec3_scale ( transform + 0 , 1.f / sx ) ;
vec3_scale ( transform + 4 , 1.f / sy ) ;
vec3_scale ( transform + 8 , 1.f / sz ) ;
float radius = sx * .5f ;
float thickness = sz * .5f ;
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_TORUS , FLOAT_BITS ( radius ) , FLOAT_BITS ( thickness ) , segmentsT , segmentsP } ;
2022-06-24 02:52:37 +00:00
uint32_t vertexCount = segmentsT * segmentsP ;
uint32_t indexCount = segmentsT * segmentsP * 6 ;
ShapeVertex * vertices ;
uint16_t * indices ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-24 02:52:37 +00:00
. transform = transform ,
2023-06-23 21:41:39 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , radius + thickness , radius + thickness , thickness } ,
2022-06-24 02:52:37 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
2022-12-22 16:25:55 +00:00
if ( ! vertices ) {
return ;
}
2022-06-24 02:52:37 +00:00
// T and P stand for toroidal and poloidal, or theta and phi
float dt = ( 2.f * ( float ) M_PI ) / segmentsT ;
float dp = ( 2.f * ( float ) M_PI ) / segmentsP ;
for ( uint32_t t = 0 ; t < segmentsT ; t + + ) {
float theta = t * dt ;
float tx = cosf ( theta ) ;
float ty = sinf ( theta ) ;
for ( uint32_t p = 0 ; p < segmentsP ; p + + ) {
float phi = p * dp ;
float nx = cosf ( phi ) * tx ;
float ny = cosf ( phi ) * ty ;
float nz = sinf ( phi ) ;
* vertices + + = ( ShapeVertex ) {
. position = { tx * radius + nx * thickness , ty * radius + ny * thickness , nz * thickness } ,
. normal = { nx , ny , nz }
} ;
uint16_t a = ( t + 0 ) * segmentsP + p ;
uint16_t b = ( t + 1 ) % segmentsT * segmentsP + p ;
uint16_t c = ( t + 0 ) * segmentsP + ( p + 1 ) % segmentsP ;
uint16_t d = ( t + 1 ) % segmentsT * segmentsP + ( p + 1 ) % segmentsP ;
2022-07-11 01:02:49 +00:00
uint16_t quad [ ] = { a , b , c , c , b , d } ;
2022-06-24 02:52:37 +00:00
memcpy ( indices , quad , sizeof ( quad ) ) ;
indices + = COUNTOF ( quad ) ;
}
}
}
2022-07-18 02:53:31 +00:00
void lovrPassText ( Pass * pass , ColoredString * strings , uint32_t count , float * transform , float wrap , HorizontalAlign halign , VerticalAlign valign ) {
Font * font = pass - > pipeline - > font ? pass - > pipeline - > font : lovrGraphicsGetDefaultFont ( ) ;
2022-07-04 05:40:05 +00:00
size_t totalLength = 0 ;
for ( uint32_t i = 0 ; i < count ; i + + ) {
totalLength + = strings [ i ] . length ;
}
2023-04-30 01:33:58 +00:00
size_t stack = tempPush ( & state . allocator ) ;
GlyphVertex * vertices = tempAlloc ( & state . allocator , totalLength * 4 * sizeof ( GlyphVertex ) ) ;
2022-07-04 05:40:05 +00:00
uint32_t glyphCount ;
uint32_t lineCount ;
float leading = lovrRasterizerGetLeading ( font - > info . rasterizer ) * font - > lineSpacing ;
float ascent = lovrRasterizerGetAscent ( font - > info . rasterizer ) ;
float scale = 1.f / font - > pixelDensity ;
wrap / = scale ;
Material * material ;
2023-06-06 02:44:53 +00:00
bool flip = pass - > cameras [ ( pass - > cameraCount - 1 ) * pass - > canvas . views ] . projection [ 5 ] > 0.f ;
2022-07-17 15:59:39 +00:00
lovrFontGetVertices ( font , strings , count , wrap , halign , valign , vertices , & glyphCount , & lineCount , & material , flip ) ;
2022-07-04 05:40:05 +00:00
2022-06-21 01:26:15 +00:00
mat4_scale ( transform , scale , scale , scale ) ;
2022-07-17 15:59:39 +00:00
float offset = - ascent + valign / 2.f * ( leading * lineCount ) ;
mat4_translate ( transform , 0.f , flip ? - offset : offset , 0.f ) ;
2022-06-21 01:26:15 +00:00
2022-07-12 03:52:35 +00:00
GlyphVertex * vertexPointer ;
2022-06-21 01:26:15 +00:00
uint16_t * indices ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-21 01:26:15 +00:00
. shader = SHADER_FONT ,
. material = font - > material ,
. transform = transform ,
. vertex . format = VERTEX_GLYPH ,
2022-07-12 03:52:35 +00:00
. vertex . pointer = ( void * * ) & vertexPointer ,
2022-07-04 05:40:05 +00:00
. vertex . count = glyphCount * 4 ,
2022-07-12 03:52:35 +00:00
. index . pointer = ( void * * ) & indices ,
. index . count = glyphCount * 6
2022-06-21 01:26:15 +00:00
} ) ;
2022-07-12 03:52:35 +00:00
memcpy ( vertexPointer , vertices , glyphCount * 4 * sizeof ( GlyphVertex ) ) ;
2022-07-04 05:40:05 +00:00
for ( uint32_t i = 0 ; i < glyphCount * 4 ; i + = 4 ) {
2022-06-21 01:26:15 +00:00
uint16_t quad [ ] = { i + 0 , i + 2 , i + 1 , i + 1 , i + 2 , i + 3 } ;
memcpy ( indices , quad , sizeof ( quad ) ) ;
indices + = COUNTOF ( quad ) ;
}
2022-06-30 03:17:26 +00:00
2023-04-30 01:33:58 +00:00
tempPop ( & state . allocator , stack ) ;
2022-06-21 01:26:15 +00:00
}
2022-07-04 07:18:38 +00:00
void lovrPassSkybox ( Pass * pass , Texture * texture ) {
2023-05-18 03:29:10 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2023-08-21 21:37:05 +00:00
. shader = ! texture | | texture - > info . type = = TEXTURE_2D ? SHADER_EQUIRECT : SHADER_CUBEMAP ,
2023-10-31 23:08:00 +00:00
. material = texture ? lovrTextureToMaterial ( texture ) : NULL ,
2023-05-18 03:26:38 +00:00
. vertex . format = VERTEX_EMPTY ,
. count = 6
} ) ;
2022-07-04 07:18:38 +00:00
}
2022-06-25 02:59:48 +00:00
void lovrPassFill ( Pass * pass , Texture * texture ) {
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2023-04-27 05:18:06 +00:00
. shader = texture & & texture - > info . type = = TEXTURE_ARRAY ? SHADER_FILL_ARRAY : SHADER_FILL_2D ,
2023-10-31 23:08:00 +00:00
. material = texture ? lovrTextureToMaterial ( texture ) : NULL ,
2022-06-25 02:59:48 +00:00
. vertex . format = VERTEX_EMPTY ,
. count = 3
} ) ;
}
2022-06-22 03:05:57 +00:00
void lovrPassMonkey ( Pass * pass , float * transform ) {
2022-07-12 03:52:35 +00:00
uint32_t key [ ] = { SHAPE_MONKEY } ;
2022-06-22 03:05:57 +00:00
uint32_t vertexCount = COUNTOF ( monkey_vertices ) / 6 ;
ShapeVertex * vertices ;
2022-07-12 03:52:35 +00:00
uint16_t * indices ;
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-12 03:52:35 +00:00
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2022-06-22 03:05:57 +00:00
. vertex . pointer = ( void * * ) & vertices ,
2022-07-12 03:52:35 +00:00
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
2022-06-22 03:05:57 +00:00
. index . count = COUNTOF ( monkey_indices ) ,
2023-06-23 21:41:39 +00:00
. transform = transform ,
. bounds = monkey_bounds
2022-06-22 03:05:57 +00:00
} ) ;
2022-07-12 03:52:35 +00:00
if ( ! vertices ) {
return ;
}
2022-06-22 03:05:57 +00:00
// Manual vertex format conversion to avoid another format (and sn8x3 isn't always supported)
for ( uint32_t i = 0 ; i < vertexCount ; i + + ) {
vertices [ i ] = ( ShapeVertex ) {
2023-06-23 21:41:39 +00:00
. position . x = monkey_vertices [ 6 * i + 0 ] / 255.f * monkey_bounds [ 3 ] * 2.f + monkey_offset [ 0 ] ,
. position . y = monkey_vertices [ 6 * i + 1 ] / 255.f * monkey_bounds [ 4 ] * 2.f + monkey_offset [ 1 ] ,
. position . z = monkey_vertices [ 6 * i + 2 ] / 255.f * monkey_bounds [ 5 ] * 2.f + monkey_offset [ 2 ] ,
2022-06-22 03:05:57 +00:00
. normal . x = monkey_vertices [ 6 * i + 3 ] / 255.f * 2.f - 1.f ,
. normal . y = monkey_vertices [ 6 * i + 4 ] / 255.f * 2.f - 1.f ,
. normal . z = monkey_vertices [ 6 * i + 5 ] / 255.f * 2.f - 1.f ,
} ;
}
2022-07-12 03:52:35 +00:00
memcpy ( indices , monkey_indices , sizeof ( monkey_indices ) ) ;
2022-06-22 03:05:57 +00:00
}
2023-06-11 05:06:29 +00:00
void lovrPassDrawMesh ( Pass * pass , Mesh * mesh , float * transform , uint32_t instances ) {
uint32_t extent = mesh - > indexCount > 0 ? mesh - > indexCount : mesh - > vertexBuffer - > info . format - > length ;
uint32_t start = MIN ( mesh - > drawStart , extent - 1 ) ;
uint32_t count = mesh - > drawCount > 0 ? MIN ( mesh - > drawCount , extent - start ) : extent - start ;
lovrMeshFlush ( mesh ) ;
lovrPassDraw ( pass , & ( DrawInfo ) {
. mode = mesh - > mode ,
. transform = transform ,
. bounds = mesh - > hasBounds ? mesh - > bounds : NULL ,
. material = mesh - > material ,
. vertex . buffer = mesh - > vertexBuffer ,
. index . buffer = mesh - > indexBuffer ,
. start = start ,
. count = count ,
. instances = instances
} ) ;
}
static void drawNode ( Pass * pass , Model * model , uint32_t index , uint32_t instances ) {
2022-07-04 00:26:31 +00:00
ModelNode * node = & model - > info . data - > nodes [ index ] ;
mat4 globalTransform = model - > globalTransforms + 16 * index ;
for ( uint32_t i = 0 ; i < node - > primitiveCount ; i + + ) {
2023-04-30 06:02:37 +00:00
DrawInfo draw = model - > draws [ node - > primitiveIndex + i ] ;
2022-07-04 02:20:30 +00:00
if ( node - > skin = = ~ 0u ) draw . transform = globalTransform ;
2022-07-04 00:26:31 +00:00
draw . instances = instances ;
lovrPassDraw ( pass , & draw ) ;
}
2023-06-11 05:06:29 +00:00
for ( uint32_t i = 0 ; i < node - > childCount ; i + + ) {
drawNode ( pass , model , node - > children [ i ] , instances ) ;
2022-07-04 00:26:31 +00:00
}
}
2023-06-11 05:06:29 +00:00
void lovrPassDrawModel ( Pass * pass , Model * model , float * transform , uint32_t instances ) {
2023-04-01 01:16:54 +00:00
lovrModelAnimateVertices ( model ) ;
2023-03-16 07:22:01 +00:00
2022-07-04 00:26:31 +00:00
if ( model - > transformsDirty ) {
updateModelTransforms ( model , model - > info . data - > rootNode , ( float [ ] ) MAT4_IDENTITY ) ;
model - > transformsDirty = false ;
}
lovrPassPush ( pass , STACK_TRANSFORM ) ;
lovrPassTransform ( pass , transform ) ;
2023-06-11 05:06:29 +00:00
drawNode ( pass , model , model - > info . data - > rootNode , instances ) ;
2022-07-04 00:26:31 +00:00
lovrPassPop ( pass , STACK_TRANSFORM ) ;
}
2023-06-10 03:15:22 +00:00
void lovrPassDrawTexture ( Pass * pass , Texture * texture , float * transform ) {
uint32_t key [ ] = { SHAPE_PLANE , STYLE_FILL , 1 , 1 } ;
ShapeVertex * vertices ;
uint16_t * indices ;
float aspect = ( float ) texture - > info . height / texture - > info . width ;
transform [ 4 ] * = aspect ;
transform [ 5 ] * = aspect ;
transform [ 6 ] * = aspect ;
transform [ 7 ] * = aspect ;
uint32_t vertexCount = 4 ;
uint32_t indexCount = 6 ;
lovrPassDraw ( pass , & ( DrawInfo ) {
. hash = hash64 ( key , sizeof ( key ) ) ,
2023-06-11 05:06:29 +00:00
. mode = DRAW_TRIANGLES ,
2023-06-10 03:15:22 +00:00
. transform = transform ,
2023-09-25 06:31:57 +00:00
. bounds = ( float [ 6 ] ) { 0.f , 0.f , 0.f , .5f , .5f , 0.f } ,
2023-10-31 23:08:00 +00:00
. material = lovrTextureToMaterial ( texture ) ,
2023-06-10 03:15:22 +00:00
. vertex . pointer = ( void * * ) & vertices ,
. vertex . count = vertexCount ,
. index . pointer = ( void * * ) & indices ,
. index . count = indexCount
} ) ;
ShapeVertex vertexData [ ] = {
{ { - .5f , .5f , 0.f } , { 0.f , 0.f , 1.f } , { 0.f , 0.f } } ,
{ { .5f , .5f , 0.f } , { 0.f , 0.f , 1.f } , { 1.f , 0.f } } ,
{ { - .5f , - .5f , 0.f } , { 0.f , 0.f , 1.f } , { 0.f , 1.f } } ,
{ { .5f , - .5f , 0.f } , { 0.f , 0.f , 1.f } , { 1.f , 1.f } }
} ;
uint16_t indexData [ ] = { 0 , 2 , 1 , 1 , 2 , 3 } ;
if ( vertices ) {
memcpy ( vertices , vertexData , sizeof ( vertexData ) ) ;
memcpy ( indices , indexData , sizeof ( indexData ) ) ;
}
}
2024-01-18 00:05:37 +00:00
void lovrPassMesh ( Pass * pass , Buffer * vertices , Buffer * indices , float * transform , uint32_t start , uint32_t count , uint32_t instances , uint32_t baseVertex ) {
2023-08-25 20:42:09 +00:00
lovrCheck ( ! indices | | indices - > info . format , " Buffer must have been created with a format to use it as a%s buffer " , " n index " ) ;
lovrCheck ( ! vertices | | vertices - > info . format , " Buffer must have been created with a format to use it as a%s buffer " , " vertex " ) ;
2024-04-16 22:04:20 +00:00
lovrCheck ( ! vertices | | ! vertices - > complexFormat , " Vertex buffers must use a simple format without nested types or arrays " ) ;
2023-01-16 13:15:13 +00:00
2022-06-10 06:05:32 +00:00
if ( count = = ~ 0u ) {
2023-04-30 06:02:37 +00:00
if ( indices | | vertices ) {
Buffer * buffer = indices ? indices : vertices ;
2023-06-24 04:14:19 +00:00
count = buffer - > info . format - > length - start ;
2023-04-30 06:02:37 +00:00
} else {
count = 0 ;
}
} else if ( indices ) {
2023-06-24 04:14:19 +00:00
lovrCheck ( count < = indices - > info . format - > length - start , " Mesh draw range exceeds index buffer size " ) ;
2022-07-10 01:30:36 +00:00
} else if ( vertices ) {
2023-06-24 04:14:19 +00:00
lovrCheck ( count < = vertices - > info . format - > length - start , " Mesh draw range exceeds vertex buffer size " ) ;
2022-06-10 06:05:32 +00:00
}
2023-04-30 06:02:37 +00:00
lovrPassDraw ( pass , & ( DrawInfo ) {
2022-07-13 02:35:23 +00:00
. mode = pass - > pipeline - > mode ,
2022-06-10 06:05:32 +00:00
. vertex . buffer = vertices ,
. index . buffer = indices ,
. transform = transform ,
. start = start ,
. count = count ,
2022-08-18 01:09:04 +00:00
. instances = instances ,
2024-01-18 00:05:37 +00:00
. baseVertex = baseVertex
2022-06-10 06:05:32 +00:00
} ) ;
}
2022-08-06 05:11:06 +00:00
void lovrPassMeshIndirect ( Pass * pass , Buffer * vertices , Buffer * indices , Buffer * draws , uint32_t count , uint32_t offset , uint32_t stride ) {
2023-04-30 06:02:37 +00:00
stride = stride ? stride : ( indices ? 20 : 16 ) ;
Shader * shader = pass - > pipeline - > shader ;
lovrCheck ( shader , " A custom Shader must be bound to source draws from a Buffer " ) ;
lovrCheck ( offset % 4 = = 0 , " Draw Buffer offset must be a multiple of 4 " ) ;
lovrCheck ( offset + count * stride < draws - > info . size , " Draw buffer range exceeds the size of the buffer " ) ;
DrawInfo info = {
2022-07-13 02:35:23 +00:00
. mode = pass - > pipeline - > mode ,
2022-06-10 06:38:33 +00:00
. vertex . buffer = vertices ,
. index . buffer = indices
} ;
2023-11-18 10:02:05 +00:00
if ( pass - > drawCount > = pass - > drawCapacity ) {
lovrAssert ( pass - > drawCount < 1 < < 16 , " Pass has too many draws! " ) ;
pass - > drawCapacity = pass - > drawCapacity > 0 ? pass - > drawCapacity < < 1 : 1 ;
Draw * draws = lovrPassAllocate ( pass , pass - > drawCapacity * sizeof ( Draw ) ) ;
memcpy ( draws , pass - > draws , pass - > drawCount * sizeof ( Draw ) ) ;
pass - > draws = draws ;
2023-04-30 06:02:37 +00:00
}
2022-06-10 06:38:33 +00:00
2024-02-20 23:07:30 +00:00
Draw * previous = pass - > drawCount > 0 ? & pass - > draws [ pass - > drawCount - 1 ] : NULL ;
2023-11-18 10:02:05 +00:00
Draw * draw = & pass - > draws [ pass - > drawCount + + ] ;
2022-06-10 06:38:33 +00:00
2023-04-30 06:02:37 +00:00
draw - > flags = DRAW_INDIRECT ;
2023-06-01 01:56:09 +00:00
draw - > tally = pass - > tally . active ? pass - > tally . count : ~ 0u ;
2023-06-06 02:44:53 +00:00
draw - > camera = pass - > cameraCount - 1 ;
pass - > flags & = ~ DIRTY_CAMERA ;
2023-04-30 06:02:37 +00:00
draw - > shader = shader ;
lovrRetain ( shader ) ;
draw - > material = pass - > pipeline - > material ;
if ( ! draw - > material ) draw - > material = state . defaultMaterial ;
trackMaterial ( pass , draw - > material ) ;
draw - > indirect . buffer = draws - > gpu ;
2023-12-30 22:17:20 +00:00
draw - > indirect . offset = draws - > base + offset ;
2023-04-30 06:02:37 +00:00
draw - > indirect . count = count ;
draw - > indirect . stride = stride ;
2024-02-20 23:07:30 +00:00
lovrPassResolvePipeline ( pass , & info , draw , previous ) ;
lovrPassResolveVertices ( pass , & info , draw ) ;
draw - > bundleInfo = lovrPassResolveBindings ( pass , shader , previous ? previous - > bundleInfo : NULL ) ;
2023-04-30 06:02:37 +00:00
2024-03-02 01:14:08 +00:00
if ( shader - > uniformCount > 0 & & pass - > flags & DIRTY_UNIFORMS ) {
2024-02-20 23:07:30 +00:00
lovrPassResolveUniforms ( pass , shader , & draw - > uniformBuffer , & draw - > uniformOffset ) ;
pass - > flags & = ~ DIRTY_UNIFORMS ;
} else {
draw - > uniformBuffer = previous ? previous - > uniformBuffer : NULL ;
draw - > uniformOffset = previous ? previous - > uniformOffset : 0 ;
}
2023-04-30 06:02:37 +00:00
mat4_init ( draw - > transform , pass - > transform ) ;
memcpy ( draw - > color , pass - > pipeline - > color , 4 * sizeof ( float ) ) ;
2022-06-10 06:38:33 +00:00
trackBuffer ( pass , draws , GPU_PHASE_INDIRECT , GPU_CACHE_INDIRECT ) ;
}
2023-06-01 01:56:09 +00:00
uint32_t lovrPassBeginTally ( Pass * pass ) {
lovrCheck ( pass - > tally . count < MAX_TALLIES , " Pass has too many tallies! " ) ;
lovrCheck ( ! pass - > tally . active , " Trying to start a tally, but the previous tally wasn't finished " ) ;
pass - > tally . active = true ;
return pass - > tally . count ;
2023-05-04 05:24:04 +00:00
}
2023-06-01 01:56:09 +00:00
uint32_t lovrPassFinishTally ( Pass * pass ) {
lovrCheck ( pass - > tally . active , " Trying to finish a tally, but no tally was started " ) ;
pass - > tally . active = false ;
return pass - > tally . count + + ;
2023-05-04 05:24:04 +00:00
}
Buffer * lovrPassGetTallyBuffer ( Pass * pass , uint32_t * offset ) {
* offset = pass - > tally . bufferOffset ;
return pass - > tally . buffer ;
}
void lovrPassSetTallyBuffer ( Pass * pass , Buffer * buffer , uint32_t offset ) {
2023-10-02 17:20:52 +00:00
lovrCheck ( offset % 4 = = 0 , " Tally buffer offset must be a multiple of 4 " ) ;
2023-05-04 05:24:04 +00:00
lovrRelease ( pass - > tally . buffer , lovrBufferDestroy ) ;
pass - > tally . buffer = buffer ;
pass - > tally . bufferOffset = offset ;
lovrRetain ( buffer ) ;
}
2022-06-04 18:54:05 +00:00
void lovrPassCompute ( Pass * pass , uint32_t x , uint32_t y , uint32_t z , Buffer * indirect , uint32_t offset ) {
2023-04-30 06:02:37 +00:00
if ( ( pass - > computeCount & ( pass - > computeCount - 1 ) ) = = 0 ) {
Compute * computes = lovrPassAllocate ( pass , MAX ( pass - > computeCount < < 1 , 1 ) * sizeof ( Compute ) ) ;
memcpy ( computes , pass - > computes , pass - > computeCount * sizeof ( Compute ) ) ;
pass - > computes = computes ;
}
2022-06-04 18:54:05 +00:00
2023-04-30 06:02:37 +00:00
Compute * previous = pass - > computeCount > 0 ? & pass - > computes [ pass - > computeCount - 1 ] : NULL ;
Compute * compute = & pass - > computes [ pass - > computeCount + + ] ;
2022-06-04 18:54:05 +00:00
Shader * shader = pass - > pipeline - > shader ;
2023-04-30 06:02:37 +00:00
2024-02-20 23:07:30 +00:00
lovrCheck ( shader - > info . type = = SHADER_COMPUTE , " To run a compute shader, a compute shader must be active " ) ;
2022-08-06 20:06:42 +00:00
lovrCheck ( x < = state . limits . workgroupCount [ 0 ] , " Compute %s count exceeds workgroupCount limit " , " x " ) ;
lovrCheck ( y < = state . limits . workgroupCount [ 1 ] , " Compute %s count exceeds workgroupCount limit " , " y " ) ;
lovrCheck ( z < = state . limits . workgroupCount [ 2 ] , " Compute %s count exceeds workgroupCount limit " , " z " ) ;
2022-06-04 18:54:05 +00:00
2023-04-30 06:02:37 +00:00
compute - > flags = 0 ;
compute - > shader = shader ;
lovrRetain ( shader ) ;
2022-06-04 18:54:05 +00:00
2023-04-30 06:02:37 +00:00
compute - > bundleInfo = lovrPassResolveBindings ( pass , shader , previous ? previous - > bundleInfo : NULL ) ;
2024-02-20 23:07:30 +00:00
2024-03-02 01:14:08 +00:00
if ( shader - > uniformCount > 0 & & pass - > flags & DIRTY_UNIFORMS ) {
2024-02-20 23:07:30 +00:00
lovrPassResolveUniforms ( pass , shader , & compute - > uniformBuffer , & compute - > uniformOffset ) ;
pass - > flags & = ~ DIRTY_UNIFORMS ;
} else {
compute - > uniformBuffer = previous ? previous - > uniformBuffer : NULL ;
compute - > uniformOffset = previous ? previous - > uniformOffset : 0 ;
}
2022-06-04 18:54:05 +00:00
if ( indirect ) {
2023-04-30 06:02:37 +00:00
compute - > flags | = COMPUTE_INDIRECT ;
compute - > indirect . buffer = indirect - > gpu ;
2023-12-30 22:17:20 +00:00
compute - > indirect . offset = indirect - > base + offset ;
2022-06-04 21:28:23 +00:00
trackBuffer ( pass , indirect , GPU_PHASE_INDIRECT , GPU_CACHE_INDIRECT ) ;
2022-06-04 18:54:05 +00:00
} else {
2023-04-30 06:02:37 +00:00
compute - > x = x ;
compute - > y = y ;
compute - > z = z ;
2022-06-04 18:54:05 +00:00
}
}
2023-05-03 23:45:01 +00:00
void lovrPassBarrier ( Pass * pass ) {
if ( pass - > computeCount > 0 ) {
pass - > computes [ pass - > computeCount - 1 ] . flags | = COMPUTE_BARRIER ;
}
}
2022-04-21 07:27:13 +00:00
// Helpers
2023-04-30 01:33:58 +00:00
static void * tempAlloc ( Allocator * allocator , size_t size ) {
2023-05-07 03:32:16 +00:00
if ( size = = 0 ) {
return NULL ;
}
2023-04-30 01:33:58 +00:00
while ( allocator - > cursor + size > allocator - > length ) {
lovrAssert ( allocator - > length < < 1 < = allocator - > limit , " Out of memory " ) ;
os_vm_commit ( allocator - > memory + allocator - > length , allocator - > length ) ;
allocator - > length < < = 1 ;
2022-04-27 05:44:44 +00:00
}
2023-04-30 01:33:58 +00:00
uint32_t cursor = ALIGN ( allocator - > cursor , 8 ) ;
allocator - > cursor = cursor + size ;
return allocator - > memory + cursor ;
2022-04-27 05:44:44 +00:00
}
2023-04-30 01:33:58 +00:00
static size_t tempPush ( Allocator * allocator ) {
return allocator - > cursor ;
2022-06-27 03:52:59 +00:00
}
2023-04-30 01:33:58 +00:00
static void tempPop ( Allocator * allocator , size_t stack ) {
allocator - > cursor = stack ;
2022-06-27 03:52:59 +00:00
}
2023-11-30 08:14:06 +00:00
static gpu_pipeline * getPipeline ( uint32_t index ) {
return ( gpu_pipeline * ) ( ( char * ) state . pipelines + index * gpu_sizeof_pipeline ( ) ) ;
}
2023-12-30 20:39:50 +00:00
static BufferBlock * getBlock ( gpu_buffer_type type , uint32_t size ) {
BufferBlock * block = state . bufferAllocators [ type ] . freelist ;
if ( block & & block - > size > = size & & gpu_is_complete ( block - > tick ) ) {
state . bufferAllocators [ type ] . freelist = block - > next ;
block - > next = NULL ;
return block ;
2023-04-28 02:48:12 +00:00
}
2024-03-11 21:38:00 +00:00
block = lovrMalloc ( sizeof ( BufferBlock ) + gpu_sizeof_buffer ( ) ) ;
2023-12-30 20:39:50 +00:00
block - > handle = ( gpu_buffer * ) ( block + 1 ) ;
block - > size = MAX ( size , 1 < < 22 ) ;
block - > next = NULL ;
block - > ref = 0 ;
2023-04-30 06:02:37 +00:00
2023-12-30 20:39:50 +00:00
gpu_buffer_init ( block - > handle , & ( gpu_buffer_info ) {
. type = type ,
. size = block - > size ,
. pointer = & block - > pointer ,
. label = " Buffer Block "
} ) ;
return block ;
}
static void freeBlock ( BufferAllocator * allocator , BufferBlock * block ) {
BufferBlock * * list = & allocator - > freelist ;
while ( * list ) list = ( BufferBlock * * ) & ( * list ) - > next ;
2024-02-08 19:22:34 +00:00
block - > next = NULL ;
2023-12-30 20:39:50 +00:00
* list = block ;
}
static BufferView allocateBuffer ( BufferAllocator * allocator , gpu_buffer_type type , uint32_t size , size_t align ) {
uint32_t cursor = ( uint32_t ) ( ( allocator - > cursor + ( align - 1 ) ) / align * align ) ;
BufferBlock * block = allocator - > current ;
2023-04-30 06:02:37 +00:00
2023-12-30 20:39:50 +00:00
if ( ! block | | cursor + size > block - > size ) {
if ( block & & type ! = GPU_BUFFER_STATIC ) {
block - > tick = state . tick ;
freeBlock ( allocator , block ) ;
}
block = getBlock ( type , size ) ;
allocator - > current = block ;
2023-04-30 06:02:37 +00:00
cursor = 0 ;
}
2023-12-30 20:39:50 +00:00
allocator - > cursor = cursor + size ;
2023-04-28 02:48:12 +00:00
2023-12-30 20:39:50 +00:00
return ( BufferView ) {
. block = block ,
. buffer = block - > handle ,
2023-04-28 02:48:12 +00:00
. offset = cursor ,
. extent = size ,
2023-12-30 22:17:20 +00:00
. pointer = block - > pointer ? ( char * ) block - > pointer + cursor : NULL
2023-04-28 02:48:12 +00:00
} ;
2023-04-30 06:02:37 +00:00
}
2023-04-28 02:48:12 +00:00
2023-12-30 20:39:50 +00:00
static BufferView getBuffer ( gpu_buffer_type type , uint32_t size , size_t align ) {
return allocateBuffer ( & state . bufferAllocators [ type ] , type , size , align ) ;
}
2022-07-04 00:26:31 +00:00
static int u64cmp ( const void * a , const void * b ) {
uint64_t x = * ( uint64_t * ) a , y = * ( uint64_t * ) b ;
return ( x > y ) - ( x < y ) ;
}
2023-12-30 22:17:20 +00:00
static uint32_t gcd ( uint32_t a , uint32_t b ) {
return b ? gcd ( b , a % b ) : a ;
}
static uint32_t lcm ( uint32_t a , uint32_t b ) {
return ( a / gcd ( a , b ) ) * b ;
}
2022-04-29 05:30:31 +00:00
static void beginFrame ( void ) {
if ( state . active ) {
return ;
}
state . active = true ;
state . tick = gpu_begin ( ) ;
2022-08-26 04:57:15 +00:00
state . stream = gpu_stream_begin ( " Internal " ) ;
2024-03-02 01:13:06 +00:00
memset ( & state . barrier , 0 , sizeof ( gpu_barrier ) ) ;
memset ( & state . transferBarrier , 0 , sizeof ( gpu_barrier ) ) ;
2022-06-19 00:43:12 +00:00
state . allocator . cursor = 0 ;
2023-04-30 06:02:37 +00:00
processReadbacks ( ) ;
2022-08-26 04:57:15 +00:00
}
2024-01-17 23:41:27 +00:00
// When a Texture is garbage collected, if it has any transfer operations recorded to state.stream,
// those transfers need to be submitted before it gets destroyed. The allocator offset is saved and
// restored, which is pretty gross, but we don't want to invalidate temp memory (currently this is
// only a problem for Font: when the font's atlas gets destroyed, it could invalidate the temp
// memory used by Font:getLines and Pass:text).
2024-03-23 20:12:26 +00:00
static void flushTransfers ( void ) {
2023-12-03 01:27:31 +00:00
if ( state . active ) {
2023-12-03 01:11:22 +00:00
size_t cursor = state . allocator . cursor ;
Fix morgue overflow;
The morgue is a fixed-size queue for GPU resources that are waiting to
be destroyed. There's been an annoying issue with it for a while where
destroying too many objects at once will trigger a "Morgue overflow!"
error. Even innocuous projects that create more than 1024 textures will
see this during a normal quit.
One way to solve this problem is to make the queue unbounded instead of
bounded. However, this can hide problems and lead to more catastrophic
failure modes.
A better solution is to add "backpressure", where we avoid putting
things in the queue if it's full, or find some way to deal with them.
In this case it means finding a way to destroy stuff in the morgue when
it's full, to make space for more victims.
We weren't able to add backpressure reliably before, because command
buffers could have commands that reference the condemned resources.
This was mostly a problem for texture transfers -- if you create
thousands of textures in a loop, we'd have a giant command buffer with
commands to transfer pixels to the textures. If these textures were
destroyed before submitting anything, the morgue would fill up, and we
wouldn't have any way to clear space because there was still a pending
command buffer that needs to act on the textures!
A simple change is to flush all pending transfers whenever a buffer or
texture is destroyed. This lets us add backpressure to the morgue
because we can guarantee that there are no pending command buffers that
refer to an object in the morgue.
For backpressure, we try to destroy the oldest object in the morgue if
the GPU is done using it. If that doesn't work, we'll wait on the fence
for its tick and destroy it. This *should* always work, although in an
extreme case you could vkDeviceWaitIdle and clear out the entire morgue.
It should also be noted that in general command buffers need to be
flushed when destroying objects that they refer to. However, for our
particular usage patterns, we only need to flush state.stream when a
buffer or texture is destroyed. Pass objects already refcount their
buffers and textures and their commands are software command buffers, so
they don't require any special handling. Other objects like shaders,
pipelines, descriptor set layouts, etc. all survive until shutdown, so
those don't impact anything either.
2023-11-30 13:45:13 +00:00
lovrGraphicsSubmit ( NULL , 0 ) ;
2023-12-03 01:11:22 +00:00
beginFrame ( ) ;
state . allocator . cursor = cursor ;
Fix morgue overflow;
The morgue is a fixed-size queue for GPU resources that are waiting to
be destroyed. There's been an annoying issue with it for a while where
destroying too many objects at once will trigger a "Morgue overflow!"
error. Even innocuous projects that create more than 1024 textures will
see this during a normal quit.
One way to solve this problem is to make the queue unbounded instead of
bounded. However, this can hide problems and lead to more catastrophic
failure modes.
A better solution is to add "backpressure", where we avoid putting
things in the queue if it's full, or find some way to deal with them.
In this case it means finding a way to destroy stuff in the morgue when
it's full, to make space for more victims.
We weren't able to add backpressure reliably before, because command
buffers could have commands that reference the condemned resources.
This was mostly a problem for texture transfers -- if you create
thousands of textures in a loop, we'd have a giant command buffer with
commands to transfer pixels to the textures. If these textures were
destroyed before submitting anything, the morgue would fill up, and we
wouldn't have any way to clear space because there was still a pending
command buffer that needs to act on the textures!
A simple change is to flush all pending transfers whenever a buffer or
texture is destroyed. This lets us add backpressure to the morgue
because we can guarantee that there are no pending command buffers that
refer to an object in the morgue.
For backpressure, we try to destroy the oldest object in the morgue if
the GPU is done using it. If that doesn't work, we'll wait on the fence
for its tick and destroy it. This *should* always work, although in an
extreme case you could vkDeviceWaitIdle and clear out the entire morgue.
It should also be noted that in general command buffers need to be
flushed when destroying objects that they refer to. However, for our
particular usage patterns, we only need to flush state.stream when a
buffer or texture is destroyed. Pass objects already refcount their
buffers and textures and their commands are software command buffers, so
they don't require any special handling. Other objects like shaders,
pipelines, descriptor set layouts, etc. all survive until shutdown, so
those don't impact anything either.
2023-11-30 13:45:13 +00:00
}
}
2022-07-14 07:05:58 +00:00
static void processReadbacks ( void ) {
while ( state . oldestReadback & & gpu_is_complete ( state . oldestReadback - > tick ) ) {
Readback * readback = state . oldestReadback ;
2023-05-07 06:36:33 +00:00
switch ( readback - > type ) {
case READBACK_BUFFER :
2023-12-30 20:39:50 +00:00
memcpy ( readback - > blob - > data , readback - > view . pointer , readback - > view . extent ) ;
2023-05-07 06:36:33 +00:00
break ;
case READBACK_TEXTURE : ;
size_t size = lovrImageGetLayerSize ( readback - > image , 0 ) ;
void * data = lovrImageGetLayerData ( readback - > image , 0 , 0 ) ;
2023-12-30 20:39:50 +00:00
memcpy ( data , readback - > view . pointer , size ) ;
2023-05-07 06:36:33 +00:00
break ;
case READBACK_TIMESTAMP : ;
2023-12-30 20:39:50 +00:00
uint32_t * timestamps = readback - > view . pointer ;
2023-05-07 06:36:33 +00:00
for ( uint32_t i = 0 ; i < readback - > count ; i + + ) {
Pass * pass = readback - > times [ i ] . pass ;
2023-06-23 21:41:39 +00:00
pass - > stats . submitTime = readback - > times [ i ] . cpuTime ;
pass - > stats . gpuTime = ( timestamps [ 2 * i + 1 ] - timestamps [ 2 * i + 0 ] ) * state . limits . timestampPeriod / 1e9 ;
2023-05-07 06:36:33 +00:00
}
break ;
default : break ;
2022-07-14 07:05:58 +00:00
}
Readback * next = readback - > next ;
lovrRelease ( readback , lovrReadbackDestroy ) ;
state . oldestReadback = next ;
}
if ( ! state . oldestReadback ) {
state . newestReadback = NULL ;
}
}
2024-01-12 01:22:58 +00:00
static gpu_pass * getPass ( Canvas * canvas ) {
gpu_pass_info info = { 0 } ;
for ( uint32_t i = 0 ; i < canvas - > count ; i + + ) {
info . color [ i ] . format = ( gpu_texture_format ) canvas - > color [ i ] . texture - > info . format ;
info . color [ i ] . srgb = canvas - > color [ i ] . texture - > info . srgb ;
info . color [ i ] . load = canvas - > resolve ? GPU_LOAD_OP_CLEAR : ( gpu_load_op ) canvas - > color [ i ] . load ;
}
DepthAttachment * depth = & canvas - > depth ;
if ( depth - > texture | | depth - > format ) {
info . depth . format = ( gpu_texture_format ) ( depth - > texture ? depth - > texture - > info . format : depth - > format ) ;
info . depth . load = ( gpu_load_op ) canvas - > resolve ? GPU_LOAD_OP_CLEAR : ( gpu_load_op ) depth - > load ;
info . depth . save = depth - > texture ? GPU_SAVE_OP_KEEP : GPU_SAVE_OP_DISCARD ;
info . depth . stencilLoad = info . depth . load ;
info . depth . stencilSave = info . depth . save ;
}
info . colorCount = canvas - > count ;
info . samples = canvas - > samples ;
info . views = canvas - > views ;
info . resolveColor = canvas - > resolve ;
info . resolveDepth = canvas - > resolve & & ! ! depth - > texture ;
info . surface = canvas - > count > 0 & & canvas - > color [ 0 ] . texture = = state . window ;
uint64_t hash = hash64 ( & info , sizeof ( info ) ) ;
uint64_t value = map_get ( & state . passLookup , hash ) ;
if ( value = = MAP_NIL ) {
2024-03-11 21:38:00 +00:00
gpu_pass * pass = lovrMalloc ( gpu_sizeof_pass ( ) ) ;
2024-01-12 01:22:58 +00:00
gpu_pass_init ( pass , & info ) ;
map_set ( & state . passLookup , hash , ( uint64_t ) ( uintptr_t ) pass ) ;
return pass ;
}
return ( gpu_pass * ) ( uintptr_t ) value ;
}
2022-08-07 01:05:30 +00:00
static size_t getLayout ( gpu_slot * slots , uint32_t count ) {
2022-05-24 04:40:57 +00:00
uint64_t hash = hash64 ( slots , count * sizeof ( gpu_slot ) ) ;
2022-08-07 01:05:30 +00:00
size_t index ;
for ( size_t index = 0 ; index < state . layouts . length ; index + + ) {
2022-05-24 04:40:57 +00:00
if ( state . layouts . data [ index ] . hash = = hash ) {
return index ;
}
}
gpu_layout_info info = {
. slots = slots ,
. count = count
} ;
2024-03-11 21:38:00 +00:00
gpu_layout * handle = lovrMalloc ( gpu_sizeof_layout ( ) ) ;
2022-05-24 04:40:57 +00:00
gpu_layout_init ( handle , & info ) ;
Layout layout = {
. hash = hash ,
. gpu = handle
} ;
index = state . layouts . length ;
arr_push ( & state . layouts , layout ) ;
return index ;
}
2023-10-02 16:07:50 +00:00
static gpu_bundle * getBundle ( size_t layoutIndex , gpu_binding * bindings , uint32_t count ) {
2022-05-30 19:17:17 +00:00
Layout * layout = & state . layouts . data [ layoutIndex ] ;
BundlePool * pool = layout - > head ;
const uint32_t POOL_SIZE = 512 ;
2023-10-02 16:07:50 +00:00
gpu_bundle * bundle = NULL ;
2022-05-30 19:17:17 +00:00
if ( pool ) {
if ( pool - > cursor < POOL_SIZE ) {
2023-10-02 16:07:50 +00:00
bundle = ( gpu_bundle * ) ( ( char * ) pool - > bundles + gpu_sizeof_bundle ( ) * pool - > cursor + + ) ;
goto write ;
2022-05-30 19:17:17 +00:00
}
// If the pool's closed, move it to the end of the list and try to use the next pool
layout - > tail - > next = pool ;
layout - > tail = pool ;
layout - > head = pool - > next ;
pool - > next = NULL ;
pool - > tick = state . tick ;
pool = layout - > head ;
2022-07-14 07:05:58 +00:00
if ( pool & & gpu_is_complete ( pool - > tick ) ) {
2023-10-02 16:07:50 +00:00
bundle = pool - > bundles ;
2022-05-30 19:17:17 +00:00
pool - > cursor = 1 ;
2023-10-02 16:07:50 +00:00
goto write ;
2022-05-30 19:17:17 +00:00
}
}
// If no pool was available, make a new one
2024-03-11 21:38:00 +00:00
pool = lovrMalloc ( sizeof ( BundlePool ) ) ;
gpu_bundle_pool * gpu = lovrMalloc ( gpu_sizeof_bundle_pool ( ) ) ;
gpu_bundle * bundles = lovrMalloc ( POOL_SIZE * gpu_sizeof_bundle ( ) ) ;
2022-05-30 19:17:17 +00:00
pool - > gpu = gpu ;
pool - > bundles = bundles ;
2022-05-30 22:06:57 +00:00
pool - > cursor = 1 ;
2022-05-30 19:17:17 +00:00
pool - > next = layout - > head ;
gpu_bundle_pool_info info = {
. bundles = pool - > bundles ,
. layout = layout - > gpu ,
. count = POOL_SIZE
} ;
gpu_bundle_pool_init ( pool - > gpu , & info ) ;
layout - > head = pool ;
if ( ! layout - > tail ) layout - > tail = pool ;
2023-10-02 16:07:50 +00:00
bundle = pool - > bundles ;
write :
gpu_bundle_write ( & bundle , & ( gpu_bundle_info ) { layout - > gpu , bindings , count } , 1 ) ;
return bundle ;
2022-05-30 19:17:17 +00:00
}
2024-03-02 01:13:06 +00:00
static gpu_texture * getScratchTexture ( gpu_stream * stream , Canvas * canvas , TextureFormat format , bool srgb ) {
2023-04-30 06:02:37 +00:00
uint16_t key [ ] = { canvas - > width , canvas - > height , canvas - > views , format , srgb , canvas - > samples } ;
2022-08-26 04:57:15 +00:00
uint32_t hash = ( uint32_t ) hash64 ( key , sizeof ( key ) ) ;
// Find a matching scratch texture that hasn't been used this frame
for ( uint32_t i = 0 ; i < state . scratchTextures . length ; i + + ) {
if ( state . scratchTextures . data [ i ] . hash = = hash & & state . scratchTextures . data [ i ] . tick ! = state . tick ) {
return state . scratchTextures . data [ i ] . texture ;
}
}
// Find something to evict
ScratchTexture * scratch = NULL ;
for ( uint32_t i = 0 ; i < state . scratchTextures . length ; i + + ) {
if ( state . tick - state . scratchTextures . data [ i ] . tick > 16 ) {
scratch = & state . scratchTextures . data [ i ] ;
break ;
}
}
if ( scratch ) {
gpu_texture_destroy ( scratch - > texture ) ;
} else {
arr_expand ( & state . scratchTextures , 1 ) ;
scratch = & state . scratchTextures . data [ state . scratchTextures . length + + ] ;
2024-03-11 21:38:00 +00:00
scratch - > texture = lovrCalloc ( gpu_sizeof_texture ( ) ) ;
2022-08-26 04:57:15 +00:00
}
2023-04-30 06:02:37 +00:00
gpu_texture_info info = {
. type = GPU_TEXTURE_ARRAY ,
. format = ( gpu_texture_format ) format ,
. srgb = srgb ,
. size = { canvas - > width , canvas - > height , canvas - > views } ,
. mipmaps = 1 ,
. samples = canvas - > samples ,
2024-01-12 01:22:58 +00:00
. usage = GPU_TEXTURE_RENDER ,
2024-03-02 01:13:06 +00:00
. upload . stream = stream
2023-04-30 06:02:37 +00:00
} ;
2024-03-10 07:22:32 +00:00
gpu_texture_init ( scratch - > texture , & info ) ;
2022-08-26 04:57:15 +00:00
scratch - > hash = hash ;
scratch - > tick = state . tick ;
return scratch - > texture ;
2022-05-11 19:51:13 +00:00
}
2022-12-10 19:20:56 +00:00
static bool isDepthFormat ( TextureFormat format ) {
return format = = FORMAT_D16 | | format = = FORMAT_D32F | | format = = FORMAT_D24S8 | | format = = FORMAT_D32FS8 ;
}
2023-11-02 20:26:49 +00:00
static bool supportsSRGB ( TextureFormat format ) {
switch ( format ) {
case FORMAT_R8 :
case FORMAT_RG8 :
case FORMAT_RGBA8 :
case FORMAT_BC1 :
case FORMAT_BC2 :
case FORMAT_BC3 :
case FORMAT_BC7 :
case FORMAT_ASTC_4x4 :
case FORMAT_ASTC_5x4 :
case FORMAT_ASTC_5x5 :
case FORMAT_ASTC_6x5 :
case FORMAT_ASTC_6x6 :
case FORMAT_ASTC_8x5 :
case FORMAT_ASTC_8x6 :
case FORMAT_ASTC_8x8 :
case FORMAT_ASTC_10x5 :
case FORMAT_ASTC_10x6 :
case FORMAT_ASTC_10x8 :
case FORMAT_ASTC_10x10 :
case FORMAT_ASTC_12x10 :
case FORMAT_ASTC_12x12 :
return true ;
default :
return false ;
}
}
2022-04-30 03:38:34 +00:00
// Returns number of bytes of a 3D texture region of a given format
2022-08-07 01:05:30 +00:00
static uint32_t measureTexture ( TextureFormat format , uint32_t w , uint32_t h , uint32_t d ) {
2022-04-30 03:38:34 +00:00
switch ( format ) {
case FORMAT_R8 : return w * h * d ;
case FORMAT_RG8 :
case FORMAT_R16 :
case FORMAT_R16F :
case FORMAT_RGB565 :
case FORMAT_RGB5A1 :
case FORMAT_D16 : return w * h * d * 2 ;
case FORMAT_RGBA8 :
case FORMAT_RG16 :
case FORMAT_RG16F :
case FORMAT_R32F :
case FORMAT_RG11B10F :
case FORMAT_RGB10A2 :
case FORMAT_D24S8 :
case FORMAT_D32F : return w * h * d * 4 ;
2022-07-17 18:03:00 +00:00
case FORMAT_D32FS8 : return w * h * d * 5 ;
2022-04-30 03:38:34 +00:00
case FORMAT_RGBA16 :
case FORMAT_RGBA16F :
case FORMAT_RG32F : return w * h * d * 8 ;
case FORMAT_RGBA32F : return w * h * d * 16 ;
2022-08-20 06:15:18 +00:00
case FORMAT_BC1 : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 8 ;
case FORMAT_BC2 : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_BC3 : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_BC4U : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 8 ;
case FORMAT_BC4S : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 8 ;
case FORMAT_BC5U : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_BC5S : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_BC6UF : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_BC6SF : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_BC7 : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
2022-04-30 03:38:34 +00:00
case FORMAT_ASTC_4x4 : return ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_ASTC_5x4 : return ( ( w + 4 ) / 5 ) * ( ( h + 3 ) / 4 ) * d * 16 ;
case FORMAT_ASTC_5x5 : return ( ( w + 4 ) / 5 ) * ( ( h + 4 ) / 5 ) * d * 16 ;
case FORMAT_ASTC_6x5 : return ( ( w + 5 ) / 6 ) * ( ( h + 4 ) / 5 ) * d * 16 ;
case FORMAT_ASTC_6x6 : return ( ( w + 5 ) / 6 ) * ( ( h + 5 ) / 6 ) * d * 16 ;
case FORMAT_ASTC_8x5 : return ( ( w + 7 ) / 8 ) * ( ( h + 4 ) / 5 ) * d * 16 ;
case FORMAT_ASTC_8x6 : return ( ( w + 7 ) / 8 ) * ( ( h + 5 ) / 6 ) * d * 16 ;
case FORMAT_ASTC_8x8 : return ( ( w + 7 ) / 8 ) * ( ( h + 7 ) / 8 ) * d * 16 ;
case FORMAT_ASTC_10x5 : return ( ( w + 9 ) / 10 ) * ( ( h + 4 ) / 5 ) * d * 16 ;
case FORMAT_ASTC_10x6 : return ( ( w + 9 ) / 10 ) * ( ( h + 5 ) / 6 ) * d * 16 ;
case FORMAT_ASTC_10x8 : return ( ( w + 9 ) / 10 ) * ( ( h + 7 ) / 8 ) * d * 16 ;
case FORMAT_ASTC_10x10 : return ( ( w + 9 ) / 10 ) * ( ( h + 9 ) / 10 ) * d * 16 ;
case FORMAT_ASTC_12x10 : return ( ( w + 11 ) / 12 ) * ( ( h + 9 ) / 10 ) * d * 16 ;
case FORMAT_ASTC_12x12 : return ( ( w + 11 ) / 12 ) * ( ( h + 11 ) / 12 ) * d * 16 ;
default : lovrUnreachable ( ) ;
}
}
2022-05-26 06:52:24 +00:00
// Errors if a 3D texture region exceeds the texture's bounds
static void checkTextureBounds ( const TextureInfo * info , uint32_t offset [ 4 ] , uint32_t extent [ 3 ] ) {
uint32_t maxWidth = MAX ( info - > width > > offset [ 3 ] , 1 ) ;
uint32_t maxHeight = MAX ( info - > height > > offset [ 3 ] , 1 ) ;
2022-07-30 22:08:30 +00:00
uint32_t maxLayers = info - > type = = TEXTURE_3D ? MAX ( info - > layers > > offset [ 3 ] , 1 ) : info - > layers ;
2022-05-26 06:52:24 +00:00
lovrCheck ( offset [ 0 ] + extent [ 0 ] < = maxWidth , " Texture x range [%d,%d] exceeds width (%d) " , offset [ 0 ] , offset [ 0 ] + extent [ 0 ] , maxWidth ) ;
lovrCheck ( offset [ 1 ] + extent [ 1 ] < = maxHeight , " Texture y range [%d,%d] exceeds height (%d) " , offset [ 1 ] , offset [ 1 ] + extent [ 1 ] , maxHeight ) ;
2022-07-30 22:08:30 +00:00
lovrCheck ( offset [ 2 ] + extent [ 2 ] < = maxLayers , " Texture layer range [%d,%d] exceeds layer count (%d) " , offset [ 2 ] , offset [ 2 ] + extent [ 2 ] , maxLayers ) ;
2022-05-26 06:52:24 +00:00
lovrCheck ( offset [ 3 ] < info - > mipmaps , " Texture mipmap %d exceeds its mipmap count (%d) " , offset [ 3 ] + 1 , info - > mipmaps ) ;
}
2022-06-23 02:05:36 +00:00
static void mipmapTexture ( gpu_stream * stream , Texture * texture , uint32_t base , uint32_t count ) {
if ( count = = ~ 0u ) count = texture - > info . mipmaps - ( base + 1 ) ;
bool volumetric = texture - > info . type = = TEXTURE_3D ;
for ( uint32_t i = 0 ; i < count ; i + + ) {
uint32_t level = base + i + 1 ;
uint32_t srcOffset [ 4 ] = { 0 , 0 , 0 , level - 1 } ;
uint32_t dstOffset [ 4 ] = { 0 , 0 , 0 , level } ;
uint32_t srcExtent [ 3 ] = {
MAX ( texture - > info . width > > ( level - 1 ) , 1 ) ,
MAX ( texture - > info . height > > ( level - 1 ) , 1 ) ,
2022-07-30 22:08:30 +00:00
volumetric ? MAX ( texture - > info . layers > > ( level - 1 ) , 1 ) : 1
2022-06-23 02:05:36 +00:00
} ;
uint32_t dstExtent [ 3 ] = {
MAX ( texture - > info . width > > level , 1 ) ,
MAX ( texture - > info . height > > level , 1 ) ,
2022-07-30 22:08:30 +00:00
volumetric ? MAX ( texture - > info . layers > > level , 1 ) : 1
2022-06-23 02:05:36 +00:00
} ;
2024-02-26 23:08:34 +00:00
gpu_blit ( stream , texture - > root - > gpu , texture - > root - > gpu , srcOffset , dstOffset , srcExtent , dstExtent , GPU_FILTER_LINEAR ) ;
2024-01-14 22:51:23 +00:00
if ( i ! = count - 1 ) {
gpu_sync ( stream , & ( gpu_barrier ) {
. prev = GPU_PHASE_BLIT ,
. next = GPU_PHASE_BLIT ,
. flush = GPU_CACHE_TRANSFER_WRITE ,
. clear = GPU_CACHE_TRANSFER_READ
} , 1 ) ;
}
2022-06-23 02:05:36 +00:00
}
}
2024-02-05 23:03:28 +00:00
static ShaderResource * findShaderResource ( Shader * shader , const char * name , size_t length ) {
uint32_t hash = ( uint32_t ) hash64 ( name , length ) ;
for ( uint32_t i = 0 ; i < shader - > resourceCount ; i + + ) {
if ( shader - > resources [ i ] . hash = = hash ) {
return & shader - > resources [ i ] ;
2022-06-04 21:28:23 +00:00
}
2022-05-24 06:10:11 +00:00
}
2024-02-05 23:03:28 +00:00
lovrThrow ( " Shader has no variable named '%s' " , name ) ;
2022-06-04 21:28:23 +00:00
}
2023-06-10 04:33:33 +00:00
static Access * getNextAccess ( Pass * pass , int type , bool texture ) {
2023-04-30 06:02:37 +00:00
AccessBlock * block = pass - > access [ type ] ;
2023-06-10 04:33:33 +00:00
2023-04-30 06:02:37 +00:00
if ( ! block | | block - > count > = COUNTOF ( block - > list ) ) {
AccessBlock * new = lovrPassAllocate ( pass , sizeof ( AccessBlock ) ) ;
pass - > access [ type ] = new ;
new - > next = block ;
new - > count = 0 ;
new - > textureMask = 0 ;
block = new ;
}
2022-06-04 21:28:23 +00:00
2023-06-10 04:33:33 +00:00
block - > textureMask | = ( uint64_t ) texture < < block - > count ;
return & block - > list [ block - > count + + ] ;
}
static void trackBuffer ( Pass * pass , Buffer * buffer , gpu_phase phase , gpu_cache cache ) {
2023-10-15 20:57:18 +00:00
if ( ! buffer ) return ;
2023-06-10 04:33:33 +00:00
Access * access = getNextAccess ( pass , phase = = GPU_PHASE_SHADER_COMPUTE ? ACCESS_COMPUTE : ACCESS_RENDER , false ) ;
access - > sync = & buffer - > sync ;
2023-12-01 03:28:06 +00:00
access - > object = buffer ;
2023-06-10 04:33:33 +00:00
access - > phase = phase ;
access - > cache = cache ;
2022-06-04 21:28:23 +00:00
lovrRetain ( buffer ) ;
}
static void trackTexture ( Pass * pass , Texture * texture , gpu_phase phase , gpu_cache cache ) {
2023-04-30 06:02:37 +00:00
if ( ! texture ) return ;
2022-06-12 05:55:43 +00:00
2023-06-10 04:33:33 +00:00
// Sample-only textures can skip sync, but still need to be refcounted
2024-02-26 23:08:34 +00:00
if ( texture - > root - > info . usage = = TEXTURE_SAMPLE ) {
2023-04-30 06:02:37 +00:00
phase = 0 ;
cache = 0 ;
}
2022-06-04 21:28:23 +00:00
2023-06-10 04:33:33 +00:00
Access * access = getNextAccess ( pass , phase = = GPU_PHASE_SHADER_COMPUTE ? ACCESS_COMPUTE : ACCESS_RENDER , true ) ;
2024-02-26 23:08:34 +00:00
access - > sync = & texture - > root - > sync ;
2023-12-01 03:28:06 +00:00
access - > object = texture ;
2023-06-10 04:33:33 +00:00
access - > phase = phase ;
access - > cache = cache ;
2022-06-04 21:28:23 +00:00
lovrRetain ( texture ) ;
2022-05-24 06:10:11 +00:00
}
2023-04-30 06:02:37 +00:00
static void trackMaterial ( Pass * pass , Material * material ) {
lovrRetain ( material ) ;
2022-08-06 07:50:25 +00:00
if ( ! material - > hasWritableTexture ) {
return ;
}
2023-04-30 06:02:37 +00:00
gpu_phase phase = GPU_PHASE_SHADER_VERTEX | GPU_PHASE_SHADER_FRAGMENT ;
gpu_cache cache = GPU_CACHE_TEXTURE ;
2022-08-06 07:50:25 +00:00
trackTexture ( pass , material - > info . texture , phase , cache ) ;
trackTexture ( pass , material - > info . glowTexture , phase , cache ) ;
trackTexture ( pass , material - > info . metalnessTexture , phase , cache ) ;
trackTexture ( pass , material - > info . roughnessTexture , phase , cache ) ;
trackTexture ( pass , material - > info . clearcoatTexture , phase , cache ) ;
2022-09-02 22:33:18 +00:00
trackTexture ( pass , material - > info . occlusionTexture , phase , cache ) ;
2022-08-06 07:50:25 +00:00
trackTexture ( pass , material - > info . normalTexture , phase , cache ) ;
}
2023-04-30 01:25:58 +00:00
static bool syncResource ( Access * access , gpu_barrier * barrier ) {
// There are 4 types of access patterns:
// - read after read:
// - no hazard, no barrier necessary
// - read after write:
// - needs execution dependency to ensure the read happens after the write
// - needs to flush the writes from the cache
// - needs to clear the cache for the read so it gets the new data
// - only needs to happen once for each type of read after a write (tracked by pendingReads)
// - if a second read happens, the first read would have already synchronized (transitive)
// - write after write:
// - needs execution dependency to ensure writes don't overlap
// - needs to flush and clear the cache
// - clears pendingReads
// - write after read:
// - needs execution dependency to ensure write starts after read is finished
// - does not need to flush any caches
// - does clear the write cache
// - clears pendingReads
Sync * sync = access - > sync ;
uint32_t read = access - > cache & GPU_CACHE_READ_MASK ;
uint32_t write = access - > cache & GPU_CACHE_WRITE_MASK ;
uint32_t newReads = read & ~ sync - > pendingReads ;
bool hasNewReads = newReads | | ( access - > phase & ~ sync - > readPhase ) ;
bool readAfterWrite = read & & sync - > pendingWrite & & hasNewReads ;
bool writeAfterWrite = write & & sync - > pendingWrite & & ! sync - > pendingReads ;
bool writeAfterRead = write & & sync - > pendingReads ;
if ( readAfterWrite ) {
barrier - > prev | = sync - > writePhase ;
barrier - > next | = access - > phase ;
barrier - > flush | = sync - > pendingWrite ;
barrier - > clear | = newReads ;
sync - > readPhase | = access - > phase ;
sync - > pendingReads | = read ;
}
if ( writeAfterWrite ) {
barrier - > prev | = sync - > writePhase ;
barrier - > next | = access - > phase ;
barrier - > flush | = sync - > pendingWrite ;
barrier - > clear | = write ;
}
if ( writeAfterRead ) {
barrier - > prev | = sync - > readPhase ;
barrier - > next | = access - > phase ;
sync - > readPhase = 0 ;
sync - > pendingReads = 0 ;
}
if ( write ) {
sync - > writePhase = access - > phase ;
sync - > pendingWrite = write ;
}
return write ;
}
2024-01-14 22:51:23 +00:00
static gpu_barrier syncTransfer ( Sync * sync , gpu_phase phase , gpu_cache cache ) {
2023-04-30 01:25:58 +00:00
gpu_barrier localBarrier = { 0 } ;
gpu_barrier * barrier = NULL ;
// If there was already a transfer write to the resource this frame, a "just in time" barrier is required
// If this is a transfer write, a "just in time" barrier is only needed if there's been a transfer read this frame
// Otherwise, the barrier can go at the beginning of the frame and get batched with other barriers
if ( sync - > lastTransferWrite = = state . tick | | ( sync - > lastTransferRead = = state . tick & & ( cache & GPU_CACHE_WRITE_MASK ) ) ) {
barrier = & localBarrier ;
} else {
2024-03-02 01:13:06 +00:00
barrier = & state . transferBarrier ;
2023-04-30 01:25:58 +00:00
}
2024-01-14 22:51:23 +00:00
syncResource ( & ( Access ) { sync , NULL , phase , cache } , barrier ) ;
2023-04-30 01:25:58 +00:00
if ( cache & GPU_CACHE_READ_MASK ) sync - > lastTransferRead = state . tick ;
if ( cache & GPU_CACHE_WRITE_MASK ) sync - > lastTransferWrite = state . tick ;
return localBarrier ;
}
2022-07-04 00:26:31 +00:00
static void updateModelTransforms ( Model * model , uint32_t nodeIndex , float * parent ) {
mat4 global = model - > globalTransforms + 16 * nodeIndex ;
NodeTransform * local = & model - > localTransforms [ nodeIndex ] ;
mat4_init ( global , parent ) ;
2024-01-26 18:47:54 +00:00
mat4_translate ( global , local - > position [ 0 ] , local - > position [ 1 ] , local - > position [ 2 ] ) ;
mat4_rotateQuat ( global , local - > rotation ) ;
mat4_scale ( global , local - > scale [ 0 ] , local - > scale [ 1 ] , local - > scale [ 2 ] ) ;
2022-07-04 00:26:31 +00:00
ModelNode * node = & model - > info . data - > nodes [ nodeIndex ] ;
for ( uint32_t i = 0 ; i < node - > childCount ; i + + ) {
updateModelTransforms ( model , node - > children [ i ] , global ) ;
}
}
2022-05-22 22:09:09 +00:00
// Only an explicit set of SPIR-V capabilities are allowed
// Some capabilities require a GPU feature to be supported
// Some common unsupported capabilities are checked directly, to provide better error messages
static void checkShaderFeatures ( uint32_t * features , uint32_t count ) {
for ( uint32_t i = 0 ; i < count ; i + + ) {
switch ( features [ i ] ) {
case 0 : break ; // Matrix
case 1 : break ; // Shader
case 2 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " geometry shading " ) ;
case 3 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " tessellation shading " ) ;
case 5 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " linkage " ) ;
case 9 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " half floats " ) ;
case 10 : lovrCheck ( state . features . float64 , " GPU does not support shader feature #%d: %s " , features [ i ] , " 64 bit floats " ) ; break ;
case 11 : lovrCheck ( state . features . int64 , " GPU does not support shader feature #%d: %s " , features [ i ] , " 64 bit integers " ) ; break ;
case 12 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " 64 bit atomics " ) ;
case 22 : lovrCheck ( state . features . int16 , " GPU does not support shader feature #%d: %s " , features [ i ] , " 16 bit integers " ) ; break ;
case 23 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " tessellation shading " ) ;
case 24 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " geometry shading " ) ;
case 25 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " extended image gather " ) ;
case 27 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " multisample storage textures " ) ;
case 32 : lovrCheck ( state . limits . clipDistances > 0 , " GPU does not support shader feature #%d: %s " , features [ i ] , " clip distance " ) ; break ;
case 33 : lovrCheck ( state . limits . cullDistances > 0 , " GPU does not support shader feature #%d: %s " , features [ i ] , " cull distance " ) ; break ;
case 34 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " cubemap array textures " ) ;
case 35 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " sample rate shading " ) ;
case 36 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " rectangle textures " ) ;
case 37 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " rectangle textures " ) ;
case 39 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " 8 bit integers " ) ;
case 40 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " input attachments " ) ;
case 41 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " sparse residency " ) ;
case 42 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " min LOD " ) ;
case 43 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " 1D textures " ) ;
case 44 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " 1D textures " ) ;
2023-11-10 19:15:16 +00:00
case 45 : break ; // Cubemap arrays
2022-05-22 22:09:09 +00:00
case 46 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " texel buffers " ) ;
case 47 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " texel buffers " ) ;
case 48 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " multisampled storage textures " ) ;
case 49 : break ; // StorageImageExtendedFormats (?)
case 50 : break ; // ImageQuery
case 51 : break ; // DerivativeControl
case 52 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " sample rate shading " ) ;
case 53 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " transform feedback " ) ;
case 54 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " geometry shading " ) ;
case 55 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " autoformat storage textures " ) ;
case 56 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " autoformat storage textures " ) ;
case 57 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " multiviewport " ) ;
case 69 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " layered rendering " ) ;
case 70 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " multiviewport " ) ;
case 4427 : break ; // ShaderDrawParameters
case 4437 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " multigpu " ) ;
case 4439 : lovrCheck ( state . limits . renderSize [ 2 ] > 1 , " GPU does not support shader feature #%d: %s " , features [ i ] , " multiview " ) ; break ;
case 5301 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " non-uniform indexing " ) ;
case 5306 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " non-uniform indexing " ) ;
case 5307 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " non-uniform indexing " ) ;
case 5308 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " non-uniform indexing " ) ;
case 5309 : lovrThrow ( " Shader uses unsupported feature #%d: %s " , features [ i ] , " non-uniform indexing " ) ;
default : lovrThrow ( " Shader uses unknown feature #%d " , features [ i ] ) ;
}
}
}
2022-11-08 06:45:10 +00:00
static void onResize ( uint32_t width , uint32_t height ) {
2023-08-27 00:23:16 +00:00
float density = os_window_get_pixel_density ( ) ;
width * = density ;
height * = density ;
2022-11-08 06:45:10 +00:00
state . window - > info . width = width ;
state . window - > info . height = height ;
gpu_surface_resize ( width , height ) ;
lovrEventPush ( ( Event ) {
. type = EVENT_RESIZE ,
. data . resize . width = width ,
. data . resize . height = height
} ) ;
}
2022-04-21 07:27:13 +00:00
static void onMessage ( void * context , const char * message , bool severe ) {
if ( severe ) {
2023-08-17 22:02:53 +00:00
# ifdef _WIN32
2023-11-27 02:15:53 +00:00
if ( ! state . defaultTexture ) { // Hacky way to determine if initialization has completed
2023-08-17 22:02:53 +00:00
const char * format = " This program requires a graphics card with support for Vulkan 1.1, but no device was found or it failed to initialize properly. The error message was: \n \n %s " ;
size_t size = snprintf ( NULL , 0 , format , message ) + 1 ;
2024-03-11 21:38:00 +00:00
char * string = lovrMalloc ( size ) ;
2023-08-17 22:02:53 +00:00
snprintf ( string , size , format , message ) ;
2023-09-18 02:04:15 +00:00
os_window_message_box ( string ) ;
2024-03-11 21:38:00 +00:00
lovrFree ( string ) ;
2023-08-17 22:02:53 +00:00
exit ( 1 ) ;
}
# endif
2022-06-09 06:59:36 +00:00
lovrThrow ( " GPU error: %s " , message ) ;
2022-04-21 07:27:13 +00:00
} else {
lovrLog ( LOG_DEBUG , " GPU " , message ) ;
}
}