mesh_storage.cpp 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317
  1. /**************************************************************************/
  2. /* mesh_storage.cpp */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #ifdef GLES3_ENABLED
  31. #include "mesh_storage.h"
  32. #include "config.h"
  33. #include "material_storage.h"
  34. #include "utilities.h"
  35. using namespace GLES3;
  36. MeshStorage *MeshStorage::singleton = nullptr;
  37. MeshStorage *MeshStorage::get_singleton() {
  38. return singleton;
  39. }
  40. MeshStorage::MeshStorage() {
  41. singleton = this;
  42. {
  43. skeleton_shader.shader.initialize();
  44. skeleton_shader.shader_version = skeleton_shader.shader.version_create();
  45. }
  46. }
  47. MeshStorage::~MeshStorage() {
  48. singleton = nullptr;
  49. skeleton_shader.shader.version_free(skeleton_shader.shader_version);
  50. }
  51. /* MESH API */
  52. RID MeshStorage::mesh_allocate() {
  53. return mesh_owner.allocate_rid();
  54. }
  55. void MeshStorage::mesh_initialize(RID p_rid) {
  56. mesh_owner.initialize_rid(p_rid, Mesh());
  57. }
  58. void MeshStorage::mesh_free(RID p_rid) {
  59. mesh_clear(p_rid);
  60. mesh_set_shadow_mesh(p_rid, RID());
  61. Mesh *mesh = mesh_owner.get_or_null(p_rid);
  62. ERR_FAIL_NULL(mesh);
  63. mesh->dependency.deleted_notify(p_rid);
  64. if (mesh->instances.size()) {
  65. ERR_PRINT("deleting mesh with active instances");
  66. }
  67. if (mesh->shadow_owners.size()) {
  68. for (Mesh *E : mesh->shadow_owners) {
  69. Mesh *shadow_owner = E;
  70. shadow_owner->shadow_mesh = RID();
  71. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  72. }
  73. }
  74. mesh_owner.free(p_rid);
  75. }
  76. void MeshStorage::mesh_set_blend_shape_count(RID p_mesh, int p_blend_shape_count) {
  77. ERR_FAIL_COND(p_blend_shape_count < 0);
  78. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  79. ERR_FAIL_NULL(mesh);
  80. ERR_FAIL_COND(mesh->surface_count > 0); //surfaces already exist
  81. mesh->blend_shape_count = p_blend_shape_count;
  82. }
  83. bool MeshStorage::mesh_needs_instance(RID p_mesh, bool p_has_skeleton) {
  84. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  85. ERR_FAIL_NULL_V(mesh, false);
  86. return mesh->blend_shape_count > 0 || (mesh->has_bone_weights && p_has_skeleton);
  87. }
  88. void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) {
  89. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  90. ERR_FAIL_NULL(mesh);
  91. ERR_FAIL_COND(mesh->surface_count == RS::MAX_MESH_SURFACES);
  92. #ifdef DEBUG_ENABLED
  93. //do a validation, to catch errors first
  94. {
  95. uint32_t stride = 0;
  96. uint32_t attrib_stride = 0;
  97. uint32_t skin_stride = 0;
  98. for (int i = 0; i < RS::ARRAY_WEIGHTS; i++) {
  99. if ((p_surface.format & (1ULL << i))) {
  100. switch (i) {
  101. case RS::ARRAY_VERTEX: {
  102. if ((p_surface.format & RS::ARRAY_FLAG_USE_2D_VERTICES) || (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  103. stride += sizeof(float) * 2;
  104. } else {
  105. stride += sizeof(float) * 3;
  106. }
  107. } break;
  108. case RS::ARRAY_NORMAL: {
  109. stride += sizeof(uint16_t) * 2;
  110. } break;
  111. case RS::ARRAY_TANGENT: {
  112. if (!(p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  113. stride += sizeof(uint16_t) * 2;
  114. }
  115. } break;
  116. case RS::ARRAY_COLOR: {
  117. attrib_stride += sizeof(uint32_t);
  118. } break;
  119. case RS::ARRAY_TEX_UV: {
  120. if (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  121. attrib_stride += sizeof(uint16_t) * 2;
  122. } else {
  123. attrib_stride += sizeof(float) * 2;
  124. }
  125. } break;
  126. case RS::ARRAY_TEX_UV2: {
  127. if (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  128. attrib_stride += sizeof(uint16_t) * 2;
  129. } else {
  130. attrib_stride += sizeof(float) * 2;
  131. }
  132. } break;
  133. case RS::ARRAY_CUSTOM0:
  134. case RS::ARRAY_CUSTOM1:
  135. case RS::ARRAY_CUSTOM2:
  136. case RS::ARRAY_CUSTOM3: {
  137. int idx = i - RS::ARRAY_CUSTOM0;
  138. uint32_t fmt_shift[RS::ARRAY_CUSTOM_COUNT] = { RS::ARRAY_FORMAT_CUSTOM0_SHIFT, RS::ARRAY_FORMAT_CUSTOM1_SHIFT, RS::ARRAY_FORMAT_CUSTOM2_SHIFT, RS::ARRAY_FORMAT_CUSTOM3_SHIFT };
  139. uint32_t fmt = (p_surface.format >> fmt_shift[idx]) & RS::ARRAY_FORMAT_CUSTOM_MASK;
  140. uint32_t fmtsize[RS::ARRAY_CUSTOM_MAX] = { 4, 4, 4, 8, 4, 8, 12, 16 };
  141. attrib_stride += fmtsize[fmt];
  142. } break;
  143. case RS::ARRAY_WEIGHTS:
  144. case RS::ARRAY_BONES: {
  145. //uses a separate array
  146. bool use_8 = p_surface.format & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  147. skin_stride += sizeof(int16_t) * (use_8 ? 16 : 8);
  148. } break;
  149. }
  150. }
  151. }
  152. int expected_size = stride * p_surface.vertex_count;
  153. ERR_FAIL_COND_MSG(expected_size != p_surface.vertex_data.size(), "Size of vertex data provided (" + itos(p_surface.vertex_data.size()) + ") does not match expected (" + itos(expected_size) + ")");
  154. int bs_expected_size = expected_size * mesh->blend_shape_count;
  155. ERR_FAIL_COND_MSG(bs_expected_size != p_surface.blend_shape_data.size(), "Size of blend shape data provided (" + itos(p_surface.blend_shape_data.size()) + ") does not match expected (" + itos(bs_expected_size) + ")");
  156. int expected_attrib_size = attrib_stride * p_surface.vertex_count;
  157. ERR_FAIL_COND_MSG(expected_attrib_size != p_surface.attribute_data.size(), "Size of attribute data provided (" + itos(p_surface.attribute_data.size()) + ") does not match expected (" + itos(expected_attrib_size) + ")");
  158. if ((p_surface.format & RS::ARRAY_FORMAT_WEIGHTS) && (p_surface.format & RS::ARRAY_FORMAT_BONES)) {
  159. expected_size = skin_stride * p_surface.vertex_count;
  160. ERR_FAIL_COND_MSG(expected_size != p_surface.skin_data.size(), "Size of skin data provided (" + itos(p_surface.skin_data.size()) + ") does not match expected (" + itos(expected_size) + ")");
  161. }
  162. }
  163. #endif
  164. uint64_t surface_version = p_surface.format & (uint64_t(RS::ARRAY_FLAG_FORMAT_VERSION_MASK) << RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT);
  165. RS::SurfaceData new_surface = p_surface;
  166. #ifdef DISABLE_DEPRECATED
  167. ERR_FAIL_COND_MSG(surface_version != RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION, "Surface version provided (" + itos(int(surface_version >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT)) + ") does not match current version (" + itos(RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) + ")");
  168. #else
  169. if (surface_version != uint64_t(RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION)) {
  170. RS::get_singleton()->fix_surface_compatibility(new_surface);
  171. surface_version = new_surface.format & (uint64_t(RS::ARRAY_FLAG_FORMAT_VERSION_MASK) << RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT);
  172. ERR_FAIL_COND_MSG(surface_version != RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION,
  173. vformat("Surface version provided (%d) does not match current version (%d).",
  174. (surface_version >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK,
  175. (RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK));
  176. }
  177. #endif
  178. Mesh::Surface *s = memnew(Mesh::Surface);
  179. s->format = new_surface.format;
  180. s->primitive = new_surface.primitive;
  181. if (new_surface.vertex_data.size()) {
  182. glGenBuffers(1, &s->vertex_buffer);
  183. glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer);
  184. // If we have an uncompressed surface that contains normals, but not tangents, we need to differentiate the array
  185. // from a compressed array in the shader. To do so, we allow the the normal to read 4 components out of the buffer
  186. // But only give it 2 components per normal. So essentially, each vertex reads the next normal in normal.zw.
  187. // This allows us to avoid adding a shader permutation, and avoid passing dummy tangents. Since the stride is kept small
  188. // this should still be a net win for bandwidth.
  189. // If we do this, then the last normal will read past the end of the array. So we need to pad the array with dummy data.
  190. if (!(new_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) && (new_surface.format & RS::ARRAY_FORMAT_NORMAL) && !(new_surface.format & RS::ARRAY_FORMAT_TANGENT)) {
  191. // Unfortunately, we need to copy the buffer, which is fine as doing a resize triggers a CoW anyway.
  192. Vector<uint8_t> new_vertex_data;
  193. new_vertex_data.resize_zeroed(new_surface.vertex_data.size() + sizeof(uint16_t) * 2);
  194. memcpy(new_vertex_data.ptrw(), new_surface.vertex_data.ptr(), new_surface.vertex_data.size());
  195. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->vertex_buffer, new_vertex_data.size(), new_vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh vertex buffer");
  196. s->vertex_buffer_size = new_vertex_data.size();
  197. } else {
  198. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->vertex_buffer, new_surface.vertex_data.size(), new_surface.vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh vertex buffer");
  199. s->vertex_buffer_size = new_surface.vertex_data.size();
  200. }
  201. }
  202. if (new_surface.attribute_data.size()) {
  203. glGenBuffers(1, &s->attribute_buffer);
  204. glBindBuffer(GL_ARRAY_BUFFER, s->attribute_buffer);
  205. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->attribute_buffer, new_surface.attribute_data.size(), new_surface.attribute_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh attribute buffer");
  206. s->attribute_buffer_size = new_surface.attribute_data.size();
  207. }
  208. if (new_surface.skin_data.size()) {
  209. glGenBuffers(1, &s->skin_buffer);
  210. glBindBuffer(GL_ARRAY_BUFFER, s->skin_buffer);
  211. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->skin_buffer, new_surface.skin_data.size(), new_surface.skin_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh skin buffer");
  212. s->skin_buffer_size = new_surface.skin_data.size();
  213. }
  214. glBindBuffer(GL_ARRAY_BUFFER, 0);
  215. s->vertex_count = new_surface.vertex_count;
  216. if (new_surface.format & RS::ARRAY_FORMAT_BONES) {
  217. mesh->has_bone_weights = true;
  218. }
  219. if (new_surface.index_count) {
  220. bool is_index_16 = new_surface.vertex_count <= 65536 && new_surface.vertex_count > 0;
  221. glGenBuffers(1, &s->index_buffer);
  222. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->index_buffer);
  223. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->index_buffer, new_surface.index_data.size(), new_surface.index_data.ptr(), GL_STATIC_DRAW, "Mesh index buffer");
  224. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); //unbind
  225. s->index_count = new_surface.index_count;
  226. s->index_buffer_size = new_surface.index_data.size();
  227. if (new_surface.lods.size()) {
  228. s->lods = memnew_arr(Mesh::Surface::LOD, new_surface.lods.size());
  229. s->lod_count = new_surface.lods.size();
  230. for (int i = 0; i < new_surface.lods.size(); i++) {
  231. glGenBuffers(1, &s->lods[i].index_buffer);
  232. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->lods[i].index_buffer);
  233. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->lods[i].index_buffer, new_surface.lods[i].index_data.size(), new_surface.lods[i].index_data.ptr(), GL_STATIC_DRAW, "Mesh index buffer LOD[" + itos(i) + "]");
  234. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); //unbind
  235. s->lods[i].edge_length = new_surface.lods[i].edge_length;
  236. s->lods[i].index_count = new_surface.lods[i].index_data.size() / (is_index_16 ? 2 : 4);
  237. s->lods[i].index_buffer_size = new_surface.lods[i].index_data.size();
  238. }
  239. }
  240. }
  241. ERR_FAIL_COND_MSG(!new_surface.index_count && !new_surface.vertex_count, "Meshes must contain a vertex array, an index array, or both");
  242. if (GLES3::Config::get_singleton()->generate_wireframes && s->primitive == RS::PRIMITIVE_TRIANGLES) {
  243. // Generate wireframes. This is mostly used by the editor.
  244. s->wireframe = memnew(Mesh::Surface::Wireframe);
  245. Vector<uint32_t> wf_indices;
  246. uint32_t &wf_index_count = s->wireframe->index_count;
  247. uint32_t *wr = nullptr;
  248. if (new_surface.format & RS::ARRAY_FORMAT_INDEX) {
  249. wf_index_count = s->index_count * 2;
  250. wf_indices.resize(wf_index_count);
  251. Vector<uint8_t> ir = new_surface.index_data;
  252. wr = wf_indices.ptrw();
  253. if (new_surface.vertex_count < (1 << 16)) {
  254. // Read 16 bit indices.
  255. const uint16_t *src_idx = (const uint16_t *)ir.ptr();
  256. for (uint32_t i = 0; i + 5 < wf_index_count; i += 6) {
  257. // We use GL_LINES instead of GL_TRIANGLES for drawing these primitives later,
  258. // so we need double the indices for each triangle.
  259. wr[i + 0] = src_idx[i / 2];
  260. wr[i + 1] = src_idx[i / 2 + 1];
  261. wr[i + 2] = src_idx[i / 2 + 1];
  262. wr[i + 3] = src_idx[i / 2 + 2];
  263. wr[i + 4] = src_idx[i / 2 + 2];
  264. wr[i + 5] = src_idx[i / 2];
  265. }
  266. } else {
  267. // Read 32 bit indices.
  268. const uint32_t *src_idx = (const uint32_t *)ir.ptr();
  269. for (uint32_t i = 0; i + 5 < wf_index_count; i += 6) {
  270. wr[i + 0] = src_idx[i / 2];
  271. wr[i + 1] = src_idx[i / 2 + 1];
  272. wr[i + 2] = src_idx[i / 2 + 1];
  273. wr[i + 3] = src_idx[i / 2 + 2];
  274. wr[i + 4] = src_idx[i / 2 + 2];
  275. wr[i + 5] = src_idx[i / 2];
  276. }
  277. }
  278. } else {
  279. // Not using indices.
  280. wf_index_count = s->vertex_count * 2;
  281. wf_indices.resize(wf_index_count);
  282. wr = wf_indices.ptrw();
  283. for (uint32_t i = 0; i + 5 < wf_index_count; i += 6) {
  284. wr[i + 0] = i / 2;
  285. wr[i + 1] = i / 2 + 1;
  286. wr[i + 2] = i / 2 + 1;
  287. wr[i + 3] = i / 2 + 2;
  288. wr[i + 4] = i / 2 + 2;
  289. wr[i + 5] = i / 2;
  290. }
  291. }
  292. s->wireframe->index_buffer_size = wf_index_count * sizeof(uint32_t);
  293. glGenBuffers(1, &s->wireframe->index_buffer);
  294. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->wireframe->index_buffer);
  295. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->wireframe->index_buffer, s->wireframe->index_buffer_size, wr, GL_STATIC_DRAW, "Mesh wireframe index buffer");
  296. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); // unbind
  297. }
  298. s->aabb = new_surface.aabb;
  299. s->bone_aabbs = new_surface.bone_aabbs; //only really useful for returning them.
  300. s->mesh_to_skeleton_xform = p_surface.mesh_to_skeleton_xform;
  301. s->uv_scale = new_surface.uv_scale;
  302. if (new_surface.skin_data.size() || mesh->blend_shape_count > 0) {
  303. // Size must match the size of the vertex array.
  304. int size = new_surface.vertex_data.size();
  305. int vertex_size = 0;
  306. int position_stride = 0;
  307. int normal_tangent_stride = 0;
  308. int normal_offset = 0;
  309. int tangent_offset = 0;
  310. if ((new_surface.format & (1ULL << RS::ARRAY_VERTEX))) {
  311. if (new_surface.format & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  312. vertex_size = 2;
  313. position_stride = sizeof(float) * vertex_size;
  314. } else {
  315. if (new_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  316. vertex_size = 4;
  317. position_stride = sizeof(uint16_t) * vertex_size;
  318. } else {
  319. vertex_size = 3;
  320. position_stride = sizeof(float) * vertex_size;
  321. }
  322. }
  323. }
  324. if ((new_surface.format & (1ULL << RS::ARRAY_NORMAL))) {
  325. normal_offset = position_stride * s->vertex_count;
  326. normal_tangent_stride += sizeof(uint16_t) * 2;
  327. }
  328. if ((new_surface.format & (1ULL << RS::ARRAY_TANGENT))) {
  329. tangent_offset = normal_offset + normal_tangent_stride;
  330. normal_tangent_stride += sizeof(uint16_t) * 2;
  331. }
  332. if (mesh->blend_shape_count > 0) {
  333. // Blend shapes are passed as one large array, for OpenGL, we need to split each of them into their own buffer
  334. s->blend_shapes = memnew_arr(Mesh::Surface::BlendShape, mesh->blend_shape_count);
  335. for (uint32_t i = 0; i < mesh->blend_shape_count; i++) {
  336. glGenVertexArrays(1, &s->blend_shapes[i].vertex_array);
  337. glBindVertexArray(s->blend_shapes[i].vertex_array);
  338. glGenBuffers(1, &s->blend_shapes[i].vertex_buffer);
  339. glBindBuffer(GL_ARRAY_BUFFER, s->blend_shapes[i].vertex_buffer);
  340. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->blend_shapes[i].vertex_buffer, size, new_surface.blend_shape_data.ptr() + i * size, (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh blend shape buffer");
  341. if ((new_surface.format & (1ULL << RS::ARRAY_VERTEX))) {
  342. glEnableVertexAttribArray(RS::ARRAY_VERTEX + 3);
  343. glVertexAttribPointer(RS::ARRAY_VERTEX + 3, vertex_size, GL_FLOAT, GL_FALSE, position_stride, CAST_INT_TO_UCHAR_PTR(0));
  344. }
  345. if ((new_surface.format & (1ULL << RS::ARRAY_NORMAL))) {
  346. // Normal and tangent are packed into the same attribute.
  347. glEnableVertexAttribArray(RS::ARRAY_NORMAL + 3);
  348. glVertexAttribPointer(RS::ARRAY_NORMAL + 3, 2, GL_UNSIGNED_SHORT, GL_TRUE, normal_tangent_stride, CAST_INT_TO_UCHAR_PTR(normal_offset));
  349. }
  350. if ((p_surface.format & (1ULL << RS::ARRAY_TANGENT))) {
  351. glEnableVertexAttribArray(RS::ARRAY_TANGENT + 3);
  352. glVertexAttribPointer(RS::ARRAY_TANGENT + 3, 2, GL_UNSIGNED_SHORT, GL_TRUE, normal_tangent_stride, CAST_INT_TO_UCHAR_PTR(tangent_offset));
  353. }
  354. }
  355. glBindVertexArray(0);
  356. glBindBuffer(GL_ARRAY_BUFFER, 0);
  357. }
  358. glBindVertexArray(0);
  359. glBindBuffer(GL_ARRAY_BUFFER, 0);
  360. }
  361. if (mesh->surface_count == 0) {
  362. mesh->aabb = new_surface.aabb;
  363. } else {
  364. mesh->aabb.merge_with(new_surface.aabb);
  365. }
  366. mesh->skeleton_aabb_version = 0;
  367. s->material = new_surface.material;
  368. mesh->surfaces = (Mesh::Surface **)memrealloc(mesh->surfaces, sizeof(Mesh::Surface *) * (mesh->surface_count + 1));
  369. mesh->surfaces[mesh->surface_count] = s;
  370. mesh->surface_count++;
  371. for (MeshInstance *mi : mesh->instances) {
  372. _mesh_instance_add_surface(mi, mesh, mesh->surface_count - 1);
  373. }
  374. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  375. for (Mesh *E : mesh->shadow_owners) {
  376. Mesh *shadow_owner = E;
  377. shadow_owner->shadow_mesh = RID();
  378. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  379. }
  380. mesh->material_cache.clear();
  381. }
  382. int MeshStorage::mesh_get_blend_shape_count(RID p_mesh) const {
  383. const Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  384. ERR_FAIL_NULL_V(mesh, -1);
  385. return mesh->blend_shape_count;
  386. }
  387. void MeshStorage::mesh_set_blend_shape_mode(RID p_mesh, RS::BlendShapeMode p_mode) {
  388. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  389. ERR_FAIL_NULL(mesh);
  390. ERR_FAIL_INDEX((int)p_mode, 2);
  391. mesh->blend_shape_mode = p_mode;
  392. }
  393. RS::BlendShapeMode MeshStorage::mesh_get_blend_shape_mode(RID p_mesh) const {
  394. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  395. ERR_FAIL_NULL_V(mesh, RS::BLEND_SHAPE_MODE_NORMALIZED);
  396. return mesh->blend_shape_mode;
  397. }
  398. void MeshStorage::mesh_surface_update_vertex_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  399. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  400. ERR_FAIL_NULL(mesh);
  401. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  402. ERR_FAIL_COND(p_data.size() == 0);
  403. uint64_t data_size = p_data.size();
  404. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->vertex_buffer_size);
  405. const uint8_t *r = p_data.ptr();
  406. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->vertex_buffer);
  407. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  408. glBindBuffer(GL_ARRAY_BUFFER, 0);
  409. }
  410. void MeshStorage::mesh_surface_update_attribute_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  411. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  412. ERR_FAIL_NULL(mesh);
  413. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  414. ERR_FAIL_COND(p_data.size() == 0);
  415. uint64_t data_size = p_data.size();
  416. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->attribute_buffer_size);
  417. const uint8_t *r = p_data.ptr();
  418. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->attribute_buffer);
  419. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  420. glBindBuffer(GL_ARRAY_BUFFER, 0);
  421. }
  422. void MeshStorage::mesh_surface_update_skin_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  423. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  424. ERR_FAIL_NULL(mesh);
  425. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  426. ERR_FAIL_COND(p_data.size() == 0);
  427. uint64_t data_size = p_data.size();
  428. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->skin_buffer_size);
  429. const uint8_t *r = p_data.ptr();
  430. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->skin_buffer);
  431. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  432. glBindBuffer(GL_ARRAY_BUFFER, 0);
  433. }
  434. void MeshStorage::mesh_surface_set_material(RID p_mesh, int p_surface, RID p_material) {
  435. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  436. ERR_FAIL_NULL(mesh);
  437. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  438. mesh->surfaces[p_surface]->material = p_material;
  439. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MATERIAL);
  440. mesh->material_cache.clear();
  441. }
  442. RID MeshStorage::mesh_surface_get_material(RID p_mesh, int p_surface) const {
  443. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  444. ERR_FAIL_NULL_V(mesh, RID());
  445. ERR_FAIL_UNSIGNED_INDEX_V((uint32_t)p_surface, mesh->surface_count, RID());
  446. return mesh->surfaces[p_surface]->material;
  447. }
  448. RS::SurfaceData MeshStorage::mesh_get_surface(RID p_mesh, int p_surface) const {
  449. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  450. ERR_FAIL_NULL_V(mesh, RS::SurfaceData());
  451. ERR_FAIL_UNSIGNED_INDEX_V((uint32_t)p_surface, mesh->surface_count, RS::SurfaceData());
  452. Mesh::Surface &s = *mesh->surfaces[p_surface];
  453. RS::SurfaceData sd;
  454. sd.format = s.format;
  455. if (s.vertex_buffer != 0) {
  456. sd.vertex_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.vertex_buffer, s.vertex_buffer_size);
  457. // When using an uncompressed buffer with normals, but without tangents, we have to trim the padding.
  458. if (!(s.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) && (s.format & RS::ARRAY_FORMAT_NORMAL) && !(s.format & RS::ARRAY_FORMAT_TANGENT)) {
  459. sd.vertex_data.resize(sd.vertex_data.size() - sizeof(uint16_t) * 2);
  460. }
  461. }
  462. if (s.attribute_buffer != 0) {
  463. sd.attribute_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.attribute_buffer, s.attribute_buffer_size);
  464. }
  465. if (s.skin_buffer != 0) {
  466. sd.skin_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.skin_buffer, s.skin_buffer_size);
  467. }
  468. sd.vertex_count = s.vertex_count;
  469. sd.index_count = s.index_count;
  470. sd.primitive = s.primitive;
  471. if (sd.index_count) {
  472. sd.index_data = Utilities::buffer_get_data(GL_ELEMENT_ARRAY_BUFFER, s.index_buffer, s.index_buffer_size);
  473. }
  474. sd.aabb = s.aabb;
  475. for (uint32_t i = 0; i < s.lod_count; i++) {
  476. RS::SurfaceData::LOD lod;
  477. lod.edge_length = s.lods[i].edge_length;
  478. lod.index_data = Utilities::buffer_get_data(GL_ELEMENT_ARRAY_BUFFER, s.lods[i].index_buffer, s.lods[i].index_buffer_size);
  479. sd.lods.push_back(lod);
  480. }
  481. sd.bone_aabbs = s.bone_aabbs;
  482. sd.mesh_to_skeleton_xform = s.mesh_to_skeleton_xform;
  483. if (mesh->blend_shape_count) {
  484. sd.blend_shape_data = Vector<uint8_t>();
  485. for (uint32_t i = 0; i < mesh->blend_shape_count; i++) {
  486. sd.blend_shape_data.append_array(Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.blend_shapes[i].vertex_buffer, s.vertex_buffer_size));
  487. }
  488. }
  489. sd.uv_scale = s.uv_scale;
  490. return sd;
  491. }
  492. int MeshStorage::mesh_get_surface_count(RID p_mesh) const {
  493. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  494. ERR_FAIL_NULL_V(mesh, 0);
  495. return mesh->surface_count;
  496. }
  497. void MeshStorage::mesh_set_custom_aabb(RID p_mesh, const AABB &p_aabb) {
  498. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  499. ERR_FAIL_NULL(mesh);
  500. mesh->custom_aabb = p_aabb;
  501. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  502. }
  503. AABB MeshStorage::mesh_get_custom_aabb(RID p_mesh) const {
  504. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  505. ERR_FAIL_NULL_V(mesh, AABB());
  506. return mesh->custom_aabb;
  507. }
  508. AABB MeshStorage::mesh_get_aabb(RID p_mesh, RID p_skeleton) {
  509. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  510. ERR_FAIL_NULL_V(mesh, AABB());
  511. if (mesh->custom_aabb != AABB()) {
  512. return mesh->custom_aabb;
  513. }
  514. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  515. if (!skeleton || skeleton->size == 0 || mesh->skeleton_aabb_version == skeleton->version) {
  516. return mesh->aabb;
  517. }
  518. // Calculate AABB based on Skeleton
  519. AABB aabb;
  520. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  521. AABB laabb;
  522. const Mesh::Surface &surface = *mesh->surfaces[i];
  523. if ((surface.format & RS::ARRAY_FORMAT_BONES) && surface.bone_aabbs.size()) {
  524. int bs = surface.bone_aabbs.size();
  525. const AABB *skbones = surface.bone_aabbs.ptr();
  526. int sbs = skeleton->size;
  527. ERR_CONTINUE(bs > sbs);
  528. const float *baseptr = skeleton->data.ptr();
  529. bool found_bone_aabb = false;
  530. if (skeleton->use_2d) {
  531. for (int j = 0; j < bs; j++) {
  532. if (skbones[j].size == Vector3(-1, -1, -1)) {
  533. continue; //bone is unused
  534. }
  535. const float *dataptr = baseptr + j * 8;
  536. Transform3D mtx;
  537. mtx.basis.rows[0][0] = dataptr[0];
  538. mtx.basis.rows[0][1] = dataptr[1];
  539. mtx.origin.x = dataptr[3];
  540. mtx.basis.rows[1][0] = dataptr[4];
  541. mtx.basis.rows[1][1] = dataptr[5];
  542. mtx.origin.y = dataptr[7];
  543. // Transform bounds to skeleton's space before applying animation data.
  544. AABB baabb = surface.mesh_to_skeleton_xform.xform(skbones[j]);
  545. baabb = mtx.xform(baabb);
  546. if (!found_bone_aabb) {
  547. laabb = baabb;
  548. found_bone_aabb = true;
  549. } else {
  550. laabb.merge_with(baabb);
  551. }
  552. }
  553. } else {
  554. for (int j = 0; j < bs; j++) {
  555. if (skbones[j].size == Vector3(-1, -1, -1)) {
  556. continue; //bone is unused
  557. }
  558. const float *dataptr = baseptr + j * 12;
  559. Transform3D mtx;
  560. mtx.basis.rows[0][0] = dataptr[0];
  561. mtx.basis.rows[0][1] = dataptr[1];
  562. mtx.basis.rows[0][2] = dataptr[2];
  563. mtx.origin.x = dataptr[3];
  564. mtx.basis.rows[1][0] = dataptr[4];
  565. mtx.basis.rows[1][1] = dataptr[5];
  566. mtx.basis.rows[1][2] = dataptr[6];
  567. mtx.origin.y = dataptr[7];
  568. mtx.basis.rows[2][0] = dataptr[8];
  569. mtx.basis.rows[2][1] = dataptr[9];
  570. mtx.basis.rows[2][2] = dataptr[10];
  571. mtx.origin.z = dataptr[11];
  572. // Transform bounds to skeleton's space before applying animation data.
  573. AABB baabb = surface.mesh_to_skeleton_xform.xform(skbones[j]);
  574. baabb = mtx.xform(baabb);
  575. if (!found_bone_aabb) {
  576. laabb = baabb;
  577. found_bone_aabb = true;
  578. } else {
  579. laabb.merge_with(baabb);
  580. }
  581. }
  582. }
  583. if (found_bone_aabb) {
  584. // Transform skeleton bounds back to mesh's space if any animated AABB applied.
  585. laabb = surface.mesh_to_skeleton_xform.affine_inverse().xform(laabb);
  586. }
  587. if (laabb.size == Vector3()) {
  588. laabb = surface.aabb;
  589. }
  590. } else {
  591. laabb = surface.aabb;
  592. }
  593. if (i == 0) {
  594. aabb = laabb;
  595. } else {
  596. aabb.merge_with(laabb);
  597. }
  598. }
  599. mesh->aabb = aabb;
  600. mesh->skeleton_aabb_version = skeleton->version;
  601. return aabb;
  602. }
  603. void MeshStorage::mesh_set_path(RID p_mesh, const String &p_path) {
  604. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  605. ERR_FAIL_NULL(mesh);
  606. mesh->path = p_path;
  607. }
  608. String MeshStorage::mesh_get_path(RID p_mesh) const {
  609. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  610. ERR_FAIL_NULL_V(mesh, String());
  611. return mesh->path;
  612. }
  613. void MeshStorage::mesh_set_shadow_mesh(RID p_mesh, RID p_shadow_mesh) {
  614. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  615. ERR_FAIL_NULL(mesh);
  616. Mesh *shadow_mesh = mesh_owner.get_or_null(mesh->shadow_mesh);
  617. if (shadow_mesh) {
  618. shadow_mesh->shadow_owners.erase(mesh);
  619. }
  620. mesh->shadow_mesh = p_shadow_mesh;
  621. shadow_mesh = mesh_owner.get_or_null(mesh->shadow_mesh);
  622. if (shadow_mesh) {
  623. shadow_mesh->shadow_owners.insert(mesh);
  624. }
  625. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  626. }
  627. void MeshStorage::mesh_clear(RID p_mesh) {
  628. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  629. ERR_FAIL_NULL(mesh);
  630. // Clear instance data before mesh data.
  631. for (MeshInstance *mi : mesh->instances) {
  632. _mesh_instance_clear(mi);
  633. }
  634. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  635. Mesh::Surface &s = *mesh->surfaces[i];
  636. if (s.vertex_buffer != 0) {
  637. GLES3::Utilities::get_singleton()->buffer_free_data(s.vertex_buffer);
  638. s.vertex_buffer = 0;
  639. }
  640. if (s.version_count != 0) {
  641. for (uint32_t j = 0; j < s.version_count; j++) {
  642. glDeleteVertexArrays(1, &s.versions[j].vertex_array);
  643. s.versions[j].vertex_array = 0;
  644. }
  645. }
  646. if (s.attribute_buffer != 0) {
  647. GLES3::Utilities::get_singleton()->buffer_free_data(s.attribute_buffer);
  648. s.attribute_buffer = 0;
  649. }
  650. if (s.skin_buffer != 0) {
  651. GLES3::Utilities::get_singleton()->buffer_free_data(s.skin_buffer);
  652. s.skin_buffer = 0;
  653. }
  654. if (s.index_buffer != 0) {
  655. GLES3::Utilities::get_singleton()->buffer_free_data(s.index_buffer);
  656. s.index_buffer = 0;
  657. }
  658. if (s.versions) {
  659. memfree(s.versions); //reallocs, so free with memfree.
  660. }
  661. if (s.wireframe) {
  662. GLES3::Utilities::get_singleton()->buffer_free_data(s.wireframe->index_buffer);
  663. memdelete(s.wireframe);
  664. }
  665. if (s.lod_count) {
  666. for (uint32_t j = 0; j < s.lod_count; j++) {
  667. if (s.lods[j].index_buffer != 0) {
  668. GLES3::Utilities::get_singleton()->buffer_free_data(s.lods[j].index_buffer);
  669. s.lods[j].index_buffer = 0;
  670. }
  671. }
  672. memdelete_arr(s.lods);
  673. }
  674. if (mesh->blend_shape_count) {
  675. for (uint32_t j = 0; j < mesh->blend_shape_count; j++) {
  676. if (s.blend_shapes[j].vertex_buffer != 0) {
  677. GLES3::Utilities::get_singleton()->buffer_free_data(s.blend_shapes[j].vertex_buffer);
  678. s.blend_shapes[j].vertex_buffer = 0;
  679. }
  680. if (s.blend_shapes[j].vertex_array != 0) {
  681. glDeleteVertexArrays(1, &s.blend_shapes[j].vertex_array);
  682. s.blend_shapes[j].vertex_array = 0;
  683. }
  684. }
  685. memdelete_arr(s.blend_shapes);
  686. }
  687. memdelete(mesh->surfaces[i]);
  688. }
  689. if (mesh->surfaces) {
  690. memfree(mesh->surfaces);
  691. }
  692. mesh->surfaces = nullptr;
  693. mesh->surface_count = 0;
  694. mesh->material_cache.clear();
  695. mesh->has_bone_weights = false;
  696. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  697. for (Mesh *E : mesh->shadow_owners) {
  698. Mesh *shadow_owner = E;
  699. shadow_owner->shadow_mesh = RID();
  700. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  701. }
  702. }
  703. void MeshStorage::_mesh_surface_generate_version_for_input_mask(Mesh::Surface::Version &v, Mesh::Surface *s, uint64_t p_input_mask, MeshInstance::Surface *mis) {
  704. Mesh::Surface::Attrib attribs[RS::ARRAY_MAX];
  705. int position_stride = 0; // Vertex position only.
  706. int normal_tangent_stride = 0;
  707. int attributes_stride = 0;
  708. int skin_stride = 0;
  709. for (int i = 0; i < RS::ARRAY_INDEX; i++) {
  710. attribs[i].enabled = false;
  711. attribs[i].integer = false;
  712. if (!(s->format & (1ULL << i))) {
  713. continue;
  714. }
  715. if ((p_input_mask & (1ULL << i))) {
  716. // Only enable if it matches input mask.
  717. // Iterate over all anyway, so we can calculate stride.
  718. attribs[i].enabled = true;
  719. }
  720. switch (i) {
  721. case RS::ARRAY_VERTEX: {
  722. attribs[i].offset = 0;
  723. attribs[i].type = GL_FLOAT;
  724. attribs[i].normalized = GL_FALSE;
  725. if (s->format & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  726. attribs[i].size = 2;
  727. position_stride = attribs[i].size * sizeof(float);
  728. } else {
  729. if (!mis && (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  730. attribs[i].size = 4;
  731. position_stride = attribs[i].size * sizeof(uint16_t);
  732. attribs[i].type = GL_UNSIGNED_SHORT;
  733. attribs[i].normalized = GL_TRUE;
  734. } else {
  735. attribs[i].size = 3;
  736. position_stride = attribs[i].size * sizeof(float);
  737. }
  738. }
  739. } break;
  740. case RS::ARRAY_NORMAL: {
  741. if (!mis && (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  742. attribs[i].size = 2;
  743. normal_tangent_stride += 2 * attribs[i].size;
  744. } else {
  745. attribs[i].size = 4;
  746. // A small trick here: if we are uncompressed and we have normals, but no tangents. We need
  747. // the shader to think there are 4 components to "axis_tangent_attrib". So we give a size of 4,
  748. // but a stride based on only having 2 elements.
  749. if (!(s->format & RS::ARRAY_FORMAT_TANGENT)) {
  750. normal_tangent_stride += (mis ? sizeof(float) : sizeof(uint16_t)) * 2;
  751. } else {
  752. normal_tangent_stride += (mis ? sizeof(float) : sizeof(uint16_t)) * 4;
  753. }
  754. }
  755. if (mis) {
  756. // Transform feedback has interleave all or no attributes. It can't mix interleaving.
  757. attribs[i].offset = position_stride;
  758. normal_tangent_stride += position_stride;
  759. position_stride = normal_tangent_stride;
  760. } else {
  761. attribs[i].offset = position_stride * s->vertex_count;
  762. }
  763. attribs[i].type = (mis ? GL_FLOAT : GL_UNSIGNED_SHORT);
  764. attribs[i].normalized = GL_TRUE;
  765. } break;
  766. case RS::ARRAY_TANGENT: {
  767. // We never use the tangent attribute. It is always packed in ARRAY_NORMAL, or ARRAY_VERTEX.
  768. attribs[i].enabled = false;
  769. attribs[i].integer = false;
  770. } break;
  771. case RS::ARRAY_COLOR: {
  772. attribs[i].offset = attributes_stride;
  773. attribs[i].size = 4;
  774. attribs[i].type = GL_UNSIGNED_BYTE;
  775. attributes_stride += 4;
  776. attribs[i].normalized = GL_TRUE;
  777. } break;
  778. case RS::ARRAY_TEX_UV: {
  779. attribs[i].offset = attributes_stride;
  780. attribs[i].size = 2;
  781. if (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  782. attribs[i].type = GL_UNSIGNED_SHORT;
  783. attributes_stride += 2 * sizeof(uint16_t);
  784. attribs[i].normalized = GL_TRUE;
  785. } else {
  786. attribs[i].type = GL_FLOAT;
  787. attributes_stride += 2 * sizeof(float);
  788. attribs[i].normalized = GL_FALSE;
  789. }
  790. } break;
  791. case RS::ARRAY_TEX_UV2: {
  792. attribs[i].offset = attributes_stride;
  793. attribs[i].size = 2;
  794. if (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  795. attribs[i].type = GL_UNSIGNED_SHORT;
  796. attributes_stride += 2 * sizeof(uint16_t);
  797. attribs[i].normalized = GL_TRUE;
  798. } else {
  799. attribs[i].type = GL_FLOAT;
  800. attributes_stride += 2 * sizeof(float);
  801. attribs[i].normalized = GL_FALSE;
  802. }
  803. } break;
  804. case RS::ARRAY_CUSTOM0:
  805. case RS::ARRAY_CUSTOM1:
  806. case RS::ARRAY_CUSTOM2:
  807. case RS::ARRAY_CUSTOM3: {
  808. attribs[i].offset = attributes_stride;
  809. int idx = i - RS::ARRAY_CUSTOM0;
  810. uint32_t fmt_shift[RS::ARRAY_CUSTOM_COUNT] = { RS::ARRAY_FORMAT_CUSTOM0_SHIFT, RS::ARRAY_FORMAT_CUSTOM1_SHIFT, RS::ARRAY_FORMAT_CUSTOM2_SHIFT, RS::ARRAY_FORMAT_CUSTOM3_SHIFT };
  811. uint32_t fmt = (s->format >> fmt_shift[idx]) & RS::ARRAY_FORMAT_CUSTOM_MASK;
  812. uint32_t fmtsize[RS::ARRAY_CUSTOM_MAX] = { 4, 4, 4, 8, 4, 8, 12, 16 };
  813. GLenum gl_type[RS::ARRAY_CUSTOM_MAX] = { GL_UNSIGNED_BYTE, GL_BYTE, GL_HALF_FLOAT, GL_HALF_FLOAT, GL_FLOAT, GL_FLOAT, GL_FLOAT, GL_FLOAT };
  814. GLboolean norm[RS::ARRAY_CUSTOM_MAX] = { GL_TRUE, GL_TRUE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE };
  815. attribs[i].type = gl_type[fmt];
  816. attributes_stride += fmtsize[fmt];
  817. attribs[i].size = fmtsize[fmt] / sizeof(float);
  818. attribs[i].normalized = norm[fmt];
  819. } break;
  820. case RS::ARRAY_BONES: {
  821. attribs[i].offset = skin_stride;
  822. attribs[i].size = 4;
  823. attribs[i].type = GL_UNSIGNED_SHORT;
  824. skin_stride += 4 * sizeof(uint16_t);
  825. attribs[i].normalized = GL_FALSE;
  826. attribs[i].integer = true;
  827. } break;
  828. case RS::ARRAY_WEIGHTS: {
  829. attribs[i].offset = skin_stride;
  830. attribs[i].size = 4;
  831. attribs[i].type = GL_UNSIGNED_SHORT;
  832. skin_stride += 4 * sizeof(uint16_t);
  833. attribs[i].normalized = GL_TRUE;
  834. } break;
  835. }
  836. }
  837. glGenVertexArrays(1, &v.vertex_array);
  838. glBindVertexArray(v.vertex_array);
  839. for (int i = 0; i < RS::ARRAY_INDEX; i++) {
  840. if (!attribs[i].enabled) {
  841. glDisableVertexAttribArray(i);
  842. continue;
  843. }
  844. if (i <= RS::ARRAY_TANGENT) {
  845. attribs[i].stride = (i == RS::ARRAY_VERTEX) ? position_stride : normal_tangent_stride;
  846. if (mis) {
  847. glBindBuffer(GL_ARRAY_BUFFER, mis->vertex_buffer);
  848. } else {
  849. glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer);
  850. }
  851. } else if (i <= RS::ARRAY_CUSTOM3) {
  852. attribs[i].stride = attributes_stride;
  853. glBindBuffer(GL_ARRAY_BUFFER, s->attribute_buffer);
  854. } else {
  855. attribs[i].stride = skin_stride;
  856. glBindBuffer(GL_ARRAY_BUFFER, s->skin_buffer);
  857. }
  858. if (attribs[i].integer) {
  859. glVertexAttribIPointer(i, attribs[i].size, attribs[i].type, attribs[i].stride, CAST_INT_TO_UCHAR_PTR(attribs[i].offset));
  860. } else {
  861. glVertexAttribPointer(i, attribs[i].size, attribs[i].type, attribs[i].normalized, attribs[i].stride, CAST_INT_TO_UCHAR_PTR(attribs[i].offset));
  862. }
  863. glEnableVertexAttribArray(i);
  864. }
  865. // Do not bind index here as we want to switch between index buffers for LOD
  866. glBindVertexArray(0);
  867. glBindBuffer(GL_ARRAY_BUFFER, 0);
  868. v.input_mask = p_input_mask;
  869. }
  870. /* MESH INSTANCE API */
  871. RID MeshStorage::mesh_instance_create(RID p_base) {
  872. Mesh *mesh = mesh_owner.get_or_null(p_base);
  873. ERR_FAIL_NULL_V(mesh, RID());
  874. RID rid = mesh_instance_owner.make_rid();
  875. MeshInstance *mi = mesh_instance_owner.get_or_null(rid);
  876. mi->mesh = mesh;
  877. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  878. _mesh_instance_add_surface(mi, mesh, i);
  879. }
  880. mi->I = mesh->instances.push_back(mi);
  881. mi->dirty = true;
  882. return rid;
  883. }
  884. void MeshStorage::mesh_instance_free(RID p_rid) {
  885. MeshInstance *mi = mesh_instance_owner.get_or_null(p_rid);
  886. _mesh_instance_clear(mi);
  887. mi->mesh->instances.erase(mi->I);
  888. mi->I = nullptr;
  889. mesh_instance_owner.free(p_rid);
  890. }
  891. void MeshStorage::mesh_instance_set_skeleton(RID p_mesh_instance, RID p_skeleton) {
  892. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  893. if (mi->skeleton == p_skeleton) {
  894. return;
  895. }
  896. mi->skeleton = p_skeleton;
  897. mi->skeleton_version = 0;
  898. mi->dirty = true;
  899. }
  900. void MeshStorage::mesh_instance_set_blend_shape_weight(RID p_mesh_instance, int p_shape, float p_weight) {
  901. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  902. ERR_FAIL_NULL(mi);
  903. ERR_FAIL_INDEX(p_shape, (int)mi->blend_weights.size());
  904. mi->blend_weights[p_shape] = p_weight;
  905. mi->dirty = true;
  906. }
  907. void MeshStorage::_mesh_instance_clear(MeshInstance *mi) {
  908. for (uint32_t i = 0; i < mi->surfaces.size(); i++) {
  909. if (mi->surfaces[i].version_count != 0) {
  910. for (uint32_t j = 0; j < mi->surfaces[i].version_count; j++) {
  911. glDeleteVertexArrays(1, &mi->surfaces[i].versions[j].vertex_array);
  912. mi->surfaces[i].versions[j].vertex_array = 0;
  913. }
  914. memfree(mi->surfaces[i].versions);
  915. }
  916. if (mi->surfaces[i].vertex_buffers[0] != 0) {
  917. GLES3::Utilities::get_singleton()->buffer_free_data(mi->surfaces[i].vertex_buffers[0]);
  918. GLES3::Utilities::get_singleton()->buffer_free_data(mi->surfaces[i].vertex_buffers[1]);
  919. mi->surfaces[i].vertex_buffers[0] = 0;
  920. mi->surfaces[i].vertex_buffers[1] = 0;
  921. }
  922. if (mi->surfaces[i].vertex_buffer != 0) {
  923. GLES3::Utilities::get_singleton()->buffer_free_data(mi->surfaces[i].vertex_buffer);
  924. mi->surfaces[i].vertex_buffer = 0;
  925. }
  926. }
  927. mi->surfaces.clear();
  928. mi->blend_weights.clear();
  929. mi->skeleton_version = 0;
  930. }
  931. void MeshStorage::_mesh_instance_add_surface(MeshInstance *mi, Mesh *mesh, uint32_t p_surface) {
  932. if (mesh->blend_shape_count > 0) {
  933. mi->blend_weights.resize(mesh->blend_shape_count);
  934. for (uint32_t i = 0; i < mi->blend_weights.size(); i++) {
  935. mi->blend_weights[i] = 0.0;
  936. }
  937. }
  938. MeshInstance::Surface s;
  939. if ((mesh->blend_shape_count > 0 || (mesh->surfaces[p_surface]->format & RS::ARRAY_FORMAT_BONES)) && mesh->surfaces[p_surface]->vertex_buffer_size > 0) {
  940. // Cache surface properties
  941. s.format_cache = mesh->surfaces[p_surface]->format;
  942. if ((s.format_cache & (1ULL << RS::ARRAY_VERTEX))) {
  943. if (s.format_cache & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  944. s.vertex_size_cache = 2;
  945. } else {
  946. s.vertex_size_cache = 3;
  947. }
  948. s.vertex_stride_cache = sizeof(float) * s.vertex_size_cache;
  949. }
  950. if ((s.format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  951. s.vertex_normal_offset_cache = s.vertex_stride_cache;
  952. s.vertex_stride_cache += sizeof(uint32_t) * 2;
  953. }
  954. if ((s.format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  955. s.vertex_tangent_offset_cache = s.vertex_stride_cache;
  956. s.vertex_stride_cache += sizeof(uint32_t) * 2;
  957. }
  958. int buffer_size = s.vertex_stride_cache * mesh->surfaces[p_surface]->vertex_count;
  959. // Buffer to be used for rendering. Final output of skeleton and blend shapes.
  960. glGenBuffers(1, &s.vertex_buffer);
  961. glBindBuffer(GL_ARRAY_BUFFER, s.vertex_buffer);
  962. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s.vertex_buffer, buffer_size, nullptr, GL_DYNAMIC_DRAW, "MeshInstance vertex buffer");
  963. if (mesh->blend_shape_count > 0) {
  964. // Ping-Pong buffers for processing blendshapes.
  965. glGenBuffers(2, s.vertex_buffers);
  966. for (uint32_t i = 0; i < 2; i++) {
  967. glBindBuffer(GL_ARRAY_BUFFER, s.vertex_buffers[i]);
  968. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s.vertex_buffers[i], buffer_size, nullptr, GL_DYNAMIC_DRAW, "MeshInstance process buffer[" + itos(i) + "]");
  969. }
  970. }
  971. glBindBuffer(GL_ARRAY_BUFFER, 0); //unbind
  972. }
  973. mi->surfaces.push_back(s);
  974. mi->dirty = true;
  975. }
  976. void MeshStorage::mesh_instance_check_for_update(RID p_mesh_instance) {
  977. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  978. bool needs_update = mi->dirty;
  979. if (mi->array_update_list.in_list()) {
  980. return;
  981. }
  982. if (!needs_update && mi->skeleton.is_valid()) {
  983. Skeleton *sk = skeleton_owner.get_or_null(mi->skeleton);
  984. if (sk && sk->version != mi->skeleton_version) {
  985. needs_update = true;
  986. }
  987. }
  988. if (needs_update) {
  989. dirty_mesh_instance_arrays.add(&mi->array_update_list);
  990. }
  991. }
  992. void MeshStorage::mesh_instance_set_canvas_item_transform(RID p_mesh_instance, const Transform2D &p_transform) {
  993. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  994. mi->canvas_item_transform_2d = p_transform;
  995. }
  996. void MeshStorage::_blend_shape_bind_mesh_instance_buffer(MeshInstance *p_mi, uint32_t p_surface) {
  997. glBindBuffer(GL_ARRAY_BUFFER, p_mi->surfaces[p_surface].vertex_buffers[0]);
  998. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_VERTEX))) {
  999. glEnableVertexAttribArray(RS::ARRAY_VERTEX);
  1000. glVertexAttribPointer(RS::ARRAY_VERTEX, p_mi->surfaces[p_surface].vertex_size_cache, GL_FLOAT, GL_FALSE, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(0));
  1001. } else {
  1002. glDisableVertexAttribArray(RS::ARRAY_VERTEX);
  1003. }
  1004. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  1005. glEnableVertexAttribArray(RS::ARRAY_NORMAL);
  1006. glVertexAttribIPointer(RS::ARRAY_NORMAL, 2, GL_UNSIGNED_INT, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(p_mi->surfaces[p_surface].vertex_normal_offset_cache));
  1007. } else {
  1008. glDisableVertexAttribArray(RS::ARRAY_NORMAL);
  1009. }
  1010. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1011. glEnableVertexAttribArray(RS::ARRAY_TANGENT);
  1012. glVertexAttribIPointer(RS::ARRAY_TANGENT, 2, GL_UNSIGNED_INT, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(p_mi->surfaces[p_surface].vertex_tangent_offset_cache));
  1013. } else {
  1014. glDisableVertexAttribArray(RS::ARRAY_TANGENT);
  1015. }
  1016. }
  1017. void MeshStorage::_compute_skeleton(MeshInstance *p_mi, Skeleton *p_sk, uint32_t p_surface) {
  1018. // Add in the bones and weights.
  1019. glBindBuffer(GL_ARRAY_BUFFER, p_mi->mesh->surfaces[p_surface]->skin_buffer);
  1020. bool use_8_weights = p_mi->surfaces[p_surface].format_cache & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  1021. int skin_stride = sizeof(int16_t) * (use_8_weights ? 16 : 8);
  1022. glEnableVertexAttribArray(RS::ARRAY_BONES);
  1023. glVertexAttribIPointer(RS::ARRAY_BONES, 4, GL_UNSIGNED_SHORT, skin_stride, CAST_INT_TO_UCHAR_PTR(0));
  1024. if (use_8_weights) {
  1025. glEnableVertexAttribArray(11);
  1026. glVertexAttribIPointer(11, 4, GL_UNSIGNED_SHORT, skin_stride, CAST_INT_TO_UCHAR_PTR(4 * sizeof(uint16_t)));
  1027. glEnableVertexAttribArray(12);
  1028. glVertexAttribPointer(12, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(8 * sizeof(uint16_t)));
  1029. glEnableVertexAttribArray(13);
  1030. glVertexAttribPointer(13, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(12 * sizeof(uint16_t)));
  1031. } else {
  1032. glEnableVertexAttribArray(RS::ARRAY_WEIGHTS);
  1033. glVertexAttribPointer(RS::ARRAY_WEIGHTS, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(4 * sizeof(uint16_t)));
  1034. }
  1035. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, p_mi->surfaces[p_surface].vertex_buffer);
  1036. glActiveTexture(GL_TEXTURE0);
  1037. glBindTexture(GL_TEXTURE_2D, p_sk->transforms_texture);
  1038. glBeginTransformFeedback(GL_POINTS);
  1039. glDrawArrays(GL_POINTS, 0, p_mi->mesh->surfaces[p_surface]->vertex_count);
  1040. glEndTransformFeedback();
  1041. glDisableVertexAttribArray(RS::ARRAY_BONES);
  1042. glDisableVertexAttribArray(RS::ARRAY_WEIGHTS);
  1043. glDisableVertexAttribArray(RS::ARRAY_BONES + 2);
  1044. glDisableVertexAttribArray(RS::ARRAY_WEIGHTS + 2);
  1045. glBindVertexArray(0);
  1046. glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, 0);
  1047. }
  1048. void MeshStorage::update_mesh_instances() {
  1049. if (dirty_mesh_instance_arrays.first() == nullptr) {
  1050. return; //nothing to do
  1051. }
  1052. glEnable(GL_RASTERIZER_DISCARD);
  1053. glBindFramebuffer(GL_FRAMEBUFFER, 0);
  1054. // Process skeletons and blend shapes using transform feedback
  1055. while (dirty_mesh_instance_arrays.first()) {
  1056. MeshInstance *mi = dirty_mesh_instance_arrays.first()->self();
  1057. Skeleton *sk = skeleton_owner.get_or_null(mi->skeleton);
  1058. // Precompute base weight if using blend shapes.
  1059. float base_weight = 1.0;
  1060. if (mi->mesh->blend_shape_count && mi->mesh->blend_shape_mode == RS::BLEND_SHAPE_MODE_NORMALIZED) {
  1061. for (uint32_t i = 0; i < mi->mesh->blend_shape_count; i++) {
  1062. base_weight -= mi->blend_weights[i];
  1063. }
  1064. }
  1065. for (uint32_t i = 0; i < mi->surfaces.size(); i++) {
  1066. if (mi->surfaces[i].vertex_buffer == 0) {
  1067. continue;
  1068. }
  1069. bool array_is_2d = mi->surfaces[i].format_cache & RS::ARRAY_FLAG_USE_2D_VERTICES;
  1070. bool can_use_skeleton = sk != nullptr && sk->use_2d == array_is_2d && (mi->surfaces[i].format_cache & RS::ARRAY_FORMAT_BONES);
  1071. bool use_8_weights = mi->surfaces[i].format_cache & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  1072. // Always process blend shapes first.
  1073. if (mi->mesh->blend_shape_count) {
  1074. SkeletonShaderGLES3::ShaderVariant variant = SkeletonShaderGLES3::MODE_BASE_PASS;
  1075. uint64_t specialization = 0;
  1076. specialization |= array_is_2d ? SkeletonShaderGLES3::MODE_2D : 0;
  1077. specialization |= SkeletonShaderGLES3::USE_BLEND_SHAPES;
  1078. if (!array_is_2d) {
  1079. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  1080. specialization |= SkeletonShaderGLES3::USE_NORMAL;
  1081. }
  1082. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1083. specialization |= SkeletonShaderGLES3::USE_TANGENT;
  1084. }
  1085. }
  1086. bool success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1087. if (!success) {
  1088. continue;
  1089. }
  1090. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, base_weight, skeleton_shader.shader_version, variant, specialization);
  1091. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1092. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1093. GLuint vertex_array_gl = 0;
  1094. uint64_t mask = RS::ARRAY_FORMAT_VERTEX | RS::ARRAY_FORMAT_NORMAL | RS::ARRAY_FORMAT_VERTEX;
  1095. uint64_t format = mi->mesh->surfaces[i]->format & mask; // Format should only have vertex, normal, tangent (as necessary).
  1096. mesh_surface_get_vertex_arrays_and_format(mi->mesh->surfaces[i], format, vertex_array_gl);
  1097. glBindVertexArray(vertex_array_gl);
  1098. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffers[0]);
  1099. glBeginTransformFeedback(GL_POINTS);
  1100. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1101. glEndTransformFeedback();
  1102. variant = SkeletonShaderGLES3::MODE_BLEND_PASS;
  1103. success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1104. if (!success) {
  1105. continue;
  1106. }
  1107. //Do the last blend shape separately, as it can be combined with the skeleton pass.
  1108. for (uint32_t bs = 0; bs < mi->mesh->blend_shape_count - 1; bs++) {
  1109. float weight = mi->blend_weights[bs];
  1110. if (Math::is_zero_approx(weight)) {
  1111. //not bother with this one
  1112. continue;
  1113. }
  1114. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, weight, skeleton_shader.shader_version, variant, specialization);
  1115. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1116. glBindVertexArray(mi->mesh->surfaces[i]->blend_shapes[bs].vertex_array);
  1117. _blend_shape_bind_mesh_instance_buffer(mi, i);
  1118. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffers[1]);
  1119. glBeginTransformFeedback(GL_POINTS);
  1120. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1121. glEndTransformFeedback();
  1122. SWAP(mi->surfaces[i].vertex_buffers[0], mi->surfaces[i].vertex_buffers[1]);
  1123. }
  1124. uint32_t bs = mi->mesh->blend_shape_count - 1;
  1125. float weight = mi->blend_weights[bs];
  1126. glBindVertexArray(mi->mesh->surfaces[i]->blend_shapes[bs].vertex_array);
  1127. _blend_shape_bind_mesh_instance_buffer(mi, i);
  1128. specialization |= can_use_skeleton ? SkeletonShaderGLES3::USE_SKELETON : 0;
  1129. specialization |= (can_use_skeleton && use_8_weights) ? SkeletonShaderGLES3::USE_EIGHT_WEIGHTS : 0;
  1130. specialization |= SkeletonShaderGLES3::FINAL_PASS;
  1131. success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1132. if (!success) {
  1133. continue;
  1134. }
  1135. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, weight, skeleton_shader.shader_version, variant, specialization);
  1136. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1137. if (can_use_skeleton) {
  1138. Transform2D transform = mi->canvas_item_transform_2d.affine_inverse() * sk->base_transform_2d;
  1139. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_X, transform[0], skeleton_shader.shader_version, variant, specialization);
  1140. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_Y, transform[1], skeleton_shader.shader_version, variant, specialization);
  1141. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_OFFSET, transform[2], skeleton_shader.shader_version, variant, specialization);
  1142. Transform2D inverse_transform = transform.affine_inverse();
  1143. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_X, inverse_transform[0], skeleton_shader.shader_version, variant, specialization);
  1144. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_Y, inverse_transform[1], skeleton_shader.shader_version, variant, specialization);
  1145. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_OFFSET, inverse_transform[2], skeleton_shader.shader_version, variant, specialization);
  1146. // Do last blendshape in the same pass as the Skeleton.
  1147. _compute_skeleton(mi, sk, i);
  1148. can_use_skeleton = false;
  1149. } else {
  1150. // Do last blendshape by itself and prepare vertex data for use by the renderer.
  1151. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffer);
  1152. glBeginTransformFeedback(GL_POINTS);
  1153. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1154. glEndTransformFeedback();
  1155. }
  1156. glBindVertexArray(0);
  1157. glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, 0);
  1158. }
  1159. // This branch should only execute when Skeleton is run by itself.
  1160. if (can_use_skeleton) {
  1161. SkeletonShaderGLES3::ShaderVariant variant = SkeletonShaderGLES3::MODE_BASE_PASS;
  1162. uint64_t specialization = 0;
  1163. specialization |= array_is_2d ? SkeletonShaderGLES3::MODE_2D : 0;
  1164. specialization |= SkeletonShaderGLES3::USE_SKELETON;
  1165. specialization |= SkeletonShaderGLES3::FINAL_PASS;
  1166. specialization |= use_8_weights ? SkeletonShaderGLES3::USE_EIGHT_WEIGHTS : 0;
  1167. if (!array_is_2d) {
  1168. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  1169. specialization |= SkeletonShaderGLES3::USE_NORMAL;
  1170. }
  1171. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1172. specialization |= SkeletonShaderGLES3::USE_TANGENT;
  1173. }
  1174. }
  1175. bool success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1176. if (!success) {
  1177. continue;
  1178. }
  1179. Transform2D transform = mi->canvas_item_transform_2d.affine_inverse() * sk->base_transform_2d;
  1180. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_X, transform[0], skeleton_shader.shader_version, variant, specialization);
  1181. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_Y, transform[1], skeleton_shader.shader_version, variant, specialization);
  1182. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_OFFSET, transform[2], skeleton_shader.shader_version, variant, specialization);
  1183. Transform2D inverse_transform = transform.affine_inverse();
  1184. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_X, inverse_transform[0], skeleton_shader.shader_version, variant, specialization);
  1185. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_Y, inverse_transform[1], skeleton_shader.shader_version, variant, specialization);
  1186. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_OFFSET, inverse_transform[2], skeleton_shader.shader_version, variant, specialization);
  1187. GLuint vertex_array_gl = 0;
  1188. uint64_t mask = RS::ARRAY_FORMAT_VERTEX | RS::ARRAY_FORMAT_NORMAL | RS::ARRAY_FORMAT_VERTEX;
  1189. uint64_t format = mi->mesh->surfaces[i]->format & mask; // Format should only have vertex, normal, tangent (as necessary).
  1190. mesh_surface_get_vertex_arrays_and_format(mi->mesh->surfaces[i], format, vertex_array_gl);
  1191. glBindVertexArray(vertex_array_gl);
  1192. _compute_skeleton(mi, sk, i);
  1193. }
  1194. }
  1195. mi->dirty = false;
  1196. if (sk) {
  1197. mi->skeleton_version = sk->version;
  1198. }
  1199. dirty_mesh_instance_arrays.remove(&mi->array_update_list);
  1200. }
  1201. glDisable(GL_RASTERIZER_DISCARD);
  1202. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1203. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, 0);
  1204. }
  1205. /* MULTIMESH API */
  1206. RID MeshStorage::multimesh_allocate() {
  1207. return multimesh_owner.allocate_rid();
  1208. }
  1209. void MeshStorage::multimesh_initialize(RID p_rid) {
  1210. multimesh_owner.initialize_rid(p_rid, MultiMesh());
  1211. }
  1212. void MeshStorage::multimesh_free(RID p_rid) {
  1213. _update_dirty_multimeshes();
  1214. multimesh_allocate_data(p_rid, 0, RS::MULTIMESH_TRANSFORM_2D);
  1215. MultiMesh *multimesh = multimesh_owner.get_or_null(p_rid);
  1216. multimesh->dependency.deleted_notify(p_rid);
  1217. multimesh_owner.free(p_rid);
  1218. }
  1219. void MeshStorage::multimesh_allocate_data(RID p_multimesh, int p_instances, RS::MultimeshTransformFormat p_transform_format, bool p_use_colors, bool p_use_custom_data) {
  1220. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1221. ERR_FAIL_NULL(multimesh);
  1222. if (multimesh->instances == p_instances && multimesh->xform_format == p_transform_format && multimesh->uses_colors == p_use_colors && multimesh->uses_custom_data == p_use_custom_data) {
  1223. return;
  1224. }
  1225. if (multimesh->buffer) {
  1226. GLES3::Utilities::get_singleton()->buffer_free_data(multimesh->buffer);
  1227. multimesh->buffer = 0;
  1228. }
  1229. if (multimesh->data_cache_dirty_regions) {
  1230. memdelete_arr(multimesh->data_cache_dirty_regions);
  1231. multimesh->data_cache_dirty_regions = nullptr;
  1232. multimesh->data_cache_used_dirty_regions = 0;
  1233. }
  1234. // If we have either color or custom data, reserve space for both to make data handling logic simpler.
  1235. // This way we can always treat them both as a single, compressed uvec4.
  1236. int color_and_custom_strides = (p_use_colors || p_use_custom_data) ? 2 : 0;
  1237. multimesh->instances = p_instances;
  1238. multimesh->xform_format = p_transform_format;
  1239. multimesh->uses_colors = p_use_colors;
  1240. multimesh->color_offset_cache = p_transform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1241. multimesh->uses_custom_data = p_use_custom_data;
  1242. multimesh->custom_data_offset_cache = multimesh->color_offset_cache + color_and_custom_strides;
  1243. multimesh->stride_cache = multimesh->custom_data_offset_cache + color_and_custom_strides;
  1244. multimesh->buffer_set = false;
  1245. multimesh->data_cache = Vector<float>();
  1246. multimesh->aabb = AABB();
  1247. multimesh->aabb_dirty = false;
  1248. multimesh->visible_instances = MIN(multimesh->visible_instances, multimesh->instances);
  1249. if (multimesh->instances) {
  1250. glGenBuffers(1, &multimesh->buffer);
  1251. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1252. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float), nullptr, GL_STATIC_DRAW, "MultiMesh buffer");
  1253. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1254. }
  1255. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MULTIMESH);
  1256. }
  1257. int MeshStorage::multimesh_get_instance_count(RID p_multimesh) const {
  1258. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1259. ERR_FAIL_NULL_V(multimesh, 0);
  1260. return multimesh->instances;
  1261. }
  1262. void MeshStorage::multimesh_set_mesh(RID p_multimesh, RID p_mesh) {
  1263. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1264. ERR_FAIL_NULL(multimesh);
  1265. if (multimesh->mesh == p_mesh || p_mesh.is_null()) {
  1266. return;
  1267. }
  1268. multimesh->mesh = p_mesh;
  1269. if (multimesh->instances == 0) {
  1270. return;
  1271. }
  1272. if (multimesh->data_cache.size()) {
  1273. //we have a data cache, just mark it dirty
  1274. _multimesh_mark_all_dirty(multimesh, false, true);
  1275. } else if (multimesh->instances) {
  1276. // Need to re-create AABB. Unfortunately, calling this has a penalty.
  1277. if (multimesh->buffer_set) {
  1278. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1279. const uint8_t *r = buffer.ptr();
  1280. const float *data = (const float *)r;
  1281. _multimesh_re_create_aabb(multimesh, data, multimesh->instances);
  1282. }
  1283. }
  1284. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  1285. }
  1286. #define MULTIMESH_DIRTY_REGION_SIZE 512
  1287. void MeshStorage::_multimesh_make_local(MultiMesh *multimesh) const {
  1288. if (multimesh->data_cache.size() > 0 || multimesh->instances == 0) {
  1289. return; //already local
  1290. }
  1291. ERR_FAIL_COND(multimesh->data_cache.size() > 0);
  1292. // this means that the user wants to load/save individual elements,
  1293. // for this, the data must reside on CPU, so just copy it there.
  1294. multimesh->data_cache.resize(multimesh->instances * multimesh->stride_cache);
  1295. {
  1296. float *w = multimesh->data_cache.ptrw();
  1297. if (multimesh->buffer_set) {
  1298. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1299. {
  1300. const uint8_t *r = buffer.ptr();
  1301. memcpy(w, r, buffer.size());
  1302. }
  1303. } else {
  1304. memset(w, 0, (size_t)multimesh->instances * multimesh->stride_cache * sizeof(float));
  1305. }
  1306. }
  1307. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1308. multimesh->data_cache_dirty_regions = memnew_arr(bool, data_cache_dirty_region_count);
  1309. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1310. multimesh->data_cache_dirty_regions[i] = false;
  1311. }
  1312. multimesh->data_cache_used_dirty_regions = 0;
  1313. }
  1314. void MeshStorage::_multimesh_mark_dirty(MultiMesh *multimesh, int p_index, bool p_aabb) {
  1315. uint32_t region_index = p_index / MULTIMESH_DIRTY_REGION_SIZE;
  1316. #ifdef DEBUG_ENABLED
  1317. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1318. ERR_FAIL_UNSIGNED_INDEX(region_index, data_cache_dirty_region_count); //bug
  1319. #endif
  1320. if (!multimesh->data_cache_dirty_regions[region_index]) {
  1321. multimesh->data_cache_dirty_regions[region_index] = true;
  1322. multimesh->data_cache_used_dirty_regions++;
  1323. }
  1324. if (p_aabb) {
  1325. multimesh->aabb_dirty = true;
  1326. }
  1327. if (!multimesh->dirty) {
  1328. multimesh->dirty_list = multimesh_dirty_list;
  1329. multimesh_dirty_list = multimesh;
  1330. multimesh->dirty = true;
  1331. }
  1332. }
  1333. void MeshStorage::_multimesh_mark_all_dirty(MultiMesh *multimesh, bool p_data, bool p_aabb) {
  1334. if (p_data) {
  1335. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1336. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1337. if (!multimesh->data_cache_dirty_regions[i]) {
  1338. multimesh->data_cache_dirty_regions[i] = true;
  1339. multimesh->data_cache_used_dirty_regions++;
  1340. }
  1341. }
  1342. }
  1343. if (p_aabb) {
  1344. multimesh->aabb_dirty = true;
  1345. }
  1346. if (!multimesh->dirty) {
  1347. multimesh->dirty_list = multimesh_dirty_list;
  1348. multimesh_dirty_list = multimesh;
  1349. multimesh->dirty = true;
  1350. }
  1351. }
  1352. void MeshStorage::_multimesh_re_create_aabb(MultiMesh *multimesh, const float *p_data, int p_instances) {
  1353. ERR_FAIL_COND(multimesh->mesh.is_null());
  1354. AABB aabb;
  1355. AABB mesh_aabb = mesh_get_aabb(multimesh->mesh);
  1356. for (int i = 0; i < p_instances; i++) {
  1357. const float *data = p_data + multimesh->stride_cache * i;
  1358. Transform3D t;
  1359. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1360. t.basis.rows[0][0] = data[0];
  1361. t.basis.rows[0][1] = data[1];
  1362. t.basis.rows[0][2] = data[2];
  1363. t.origin.x = data[3];
  1364. t.basis.rows[1][0] = data[4];
  1365. t.basis.rows[1][1] = data[5];
  1366. t.basis.rows[1][2] = data[6];
  1367. t.origin.y = data[7];
  1368. t.basis.rows[2][0] = data[8];
  1369. t.basis.rows[2][1] = data[9];
  1370. t.basis.rows[2][2] = data[10];
  1371. t.origin.z = data[11];
  1372. } else {
  1373. t.basis.rows[0][0] = data[0];
  1374. t.basis.rows[0][1] = data[1];
  1375. t.origin.x = data[3];
  1376. t.basis.rows[1][0] = data[4];
  1377. t.basis.rows[1][1] = data[5];
  1378. t.origin.y = data[7];
  1379. }
  1380. if (i == 0) {
  1381. aabb = t.xform(mesh_aabb);
  1382. } else {
  1383. aabb.merge_with(t.xform(mesh_aabb));
  1384. }
  1385. }
  1386. multimesh->aabb = aabb;
  1387. }
  1388. void MeshStorage::multimesh_instance_set_transform(RID p_multimesh, int p_index, const Transform3D &p_transform) {
  1389. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1390. ERR_FAIL_NULL(multimesh);
  1391. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1392. ERR_FAIL_COND(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_3D);
  1393. _multimesh_make_local(multimesh);
  1394. {
  1395. float *w = multimesh->data_cache.ptrw();
  1396. float *dataptr = w + p_index * multimesh->stride_cache;
  1397. dataptr[0] = p_transform.basis.rows[0][0];
  1398. dataptr[1] = p_transform.basis.rows[0][1];
  1399. dataptr[2] = p_transform.basis.rows[0][2];
  1400. dataptr[3] = p_transform.origin.x;
  1401. dataptr[4] = p_transform.basis.rows[1][0];
  1402. dataptr[5] = p_transform.basis.rows[1][1];
  1403. dataptr[6] = p_transform.basis.rows[1][2];
  1404. dataptr[7] = p_transform.origin.y;
  1405. dataptr[8] = p_transform.basis.rows[2][0];
  1406. dataptr[9] = p_transform.basis.rows[2][1];
  1407. dataptr[10] = p_transform.basis.rows[2][2];
  1408. dataptr[11] = p_transform.origin.z;
  1409. }
  1410. _multimesh_mark_dirty(multimesh, p_index, true);
  1411. }
  1412. void MeshStorage::multimesh_instance_set_transform_2d(RID p_multimesh, int p_index, const Transform2D &p_transform) {
  1413. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1414. ERR_FAIL_NULL(multimesh);
  1415. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1416. ERR_FAIL_COND(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_2D);
  1417. _multimesh_make_local(multimesh);
  1418. {
  1419. float *w = multimesh->data_cache.ptrw();
  1420. float *dataptr = w + p_index * multimesh->stride_cache;
  1421. dataptr[0] = p_transform.columns[0][0];
  1422. dataptr[1] = p_transform.columns[1][0];
  1423. dataptr[2] = 0;
  1424. dataptr[3] = p_transform.columns[2][0];
  1425. dataptr[4] = p_transform.columns[0][1];
  1426. dataptr[5] = p_transform.columns[1][1];
  1427. dataptr[6] = 0;
  1428. dataptr[7] = p_transform.columns[2][1];
  1429. }
  1430. _multimesh_mark_dirty(multimesh, p_index, true);
  1431. }
  1432. void MeshStorage::multimesh_instance_set_color(RID p_multimesh, int p_index, const Color &p_color) {
  1433. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1434. ERR_FAIL_NULL(multimesh);
  1435. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1436. ERR_FAIL_COND(!multimesh->uses_colors);
  1437. _multimesh_make_local(multimesh);
  1438. {
  1439. // Colors are packed into 2 floats.
  1440. float *w = multimesh->data_cache.ptrw();
  1441. float *dataptr = w + p_index * multimesh->stride_cache + multimesh->color_offset_cache;
  1442. uint16_t val[4] = { Math::make_half_float(p_color.r), Math::make_half_float(p_color.g), Math::make_half_float(p_color.b), Math::make_half_float(p_color.a) };
  1443. memcpy(dataptr, val, 2 * 4);
  1444. }
  1445. _multimesh_mark_dirty(multimesh, p_index, false);
  1446. }
  1447. void MeshStorage::multimesh_instance_set_custom_data(RID p_multimesh, int p_index, const Color &p_color) {
  1448. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1449. ERR_FAIL_NULL(multimesh);
  1450. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1451. ERR_FAIL_COND(!multimesh->uses_custom_data);
  1452. _multimesh_make_local(multimesh);
  1453. {
  1454. float *w = multimesh->data_cache.ptrw();
  1455. float *dataptr = w + p_index * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1456. uint16_t val[4] = { Math::make_half_float(p_color.r), Math::make_half_float(p_color.g), Math::make_half_float(p_color.b), Math::make_half_float(p_color.a) };
  1457. memcpy(dataptr, val, 2 * 4);
  1458. }
  1459. _multimesh_mark_dirty(multimesh, p_index, false);
  1460. }
  1461. RID MeshStorage::multimesh_get_mesh(RID p_multimesh) const {
  1462. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1463. ERR_FAIL_NULL_V(multimesh, RID());
  1464. return multimesh->mesh;
  1465. }
  1466. AABB MeshStorage::multimesh_get_aabb(RID p_multimesh) const {
  1467. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1468. ERR_FAIL_NULL_V(multimesh, AABB());
  1469. if (multimesh->aabb_dirty) {
  1470. const_cast<MeshStorage *>(this)->_update_dirty_multimeshes();
  1471. }
  1472. return multimesh->aabb;
  1473. }
  1474. Transform3D MeshStorage::multimesh_instance_get_transform(RID p_multimesh, int p_index) const {
  1475. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1476. ERR_FAIL_NULL_V(multimesh, Transform3D());
  1477. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Transform3D());
  1478. ERR_FAIL_COND_V(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_3D, Transform3D());
  1479. _multimesh_make_local(multimesh);
  1480. Transform3D t;
  1481. {
  1482. const float *r = multimesh->data_cache.ptr();
  1483. const float *dataptr = r + p_index * multimesh->stride_cache;
  1484. t.basis.rows[0][0] = dataptr[0];
  1485. t.basis.rows[0][1] = dataptr[1];
  1486. t.basis.rows[0][2] = dataptr[2];
  1487. t.origin.x = dataptr[3];
  1488. t.basis.rows[1][0] = dataptr[4];
  1489. t.basis.rows[1][1] = dataptr[5];
  1490. t.basis.rows[1][2] = dataptr[6];
  1491. t.origin.y = dataptr[7];
  1492. t.basis.rows[2][0] = dataptr[8];
  1493. t.basis.rows[2][1] = dataptr[9];
  1494. t.basis.rows[2][2] = dataptr[10];
  1495. t.origin.z = dataptr[11];
  1496. }
  1497. return t;
  1498. }
  1499. Transform2D MeshStorage::multimesh_instance_get_transform_2d(RID p_multimesh, int p_index) const {
  1500. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1501. ERR_FAIL_NULL_V(multimesh, Transform2D());
  1502. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Transform2D());
  1503. ERR_FAIL_COND_V(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_2D, Transform2D());
  1504. _multimesh_make_local(multimesh);
  1505. Transform2D t;
  1506. {
  1507. const float *r = multimesh->data_cache.ptr();
  1508. const float *dataptr = r + p_index * multimesh->stride_cache;
  1509. t.columns[0][0] = dataptr[0];
  1510. t.columns[1][0] = dataptr[1];
  1511. t.columns[2][0] = dataptr[3];
  1512. t.columns[0][1] = dataptr[4];
  1513. t.columns[1][1] = dataptr[5];
  1514. t.columns[2][1] = dataptr[7];
  1515. }
  1516. return t;
  1517. }
  1518. Color MeshStorage::multimesh_instance_get_color(RID p_multimesh, int p_index) const {
  1519. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1520. ERR_FAIL_NULL_V(multimesh, Color());
  1521. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Color());
  1522. ERR_FAIL_COND_V(!multimesh->uses_colors, Color());
  1523. _multimesh_make_local(multimesh);
  1524. Color c;
  1525. {
  1526. const float *r = multimesh->data_cache.ptr();
  1527. const float *dataptr = r + p_index * multimesh->stride_cache + multimesh->color_offset_cache;
  1528. uint16_t raw_data[4];
  1529. memcpy(raw_data, dataptr, 2 * 4);
  1530. c.r = Math::half_to_float(raw_data[0]);
  1531. c.g = Math::half_to_float(raw_data[1]);
  1532. c.b = Math::half_to_float(raw_data[2]);
  1533. c.a = Math::half_to_float(raw_data[3]);
  1534. }
  1535. return c;
  1536. }
  1537. Color MeshStorage::multimesh_instance_get_custom_data(RID p_multimesh, int p_index) const {
  1538. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1539. ERR_FAIL_NULL_V(multimesh, Color());
  1540. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Color());
  1541. ERR_FAIL_COND_V(!multimesh->uses_custom_data, Color());
  1542. _multimesh_make_local(multimesh);
  1543. Color c;
  1544. {
  1545. const float *r = multimesh->data_cache.ptr();
  1546. const float *dataptr = r + p_index * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1547. uint16_t raw_data[4];
  1548. memcpy(raw_data, dataptr, 2 * 4);
  1549. c.r = Math::half_to_float(raw_data[0]);
  1550. c.g = Math::half_to_float(raw_data[1]);
  1551. c.b = Math::half_to_float(raw_data[2]);
  1552. c.a = Math::half_to_float(raw_data[3]);
  1553. }
  1554. return c;
  1555. }
  1556. void MeshStorage::multimesh_set_buffer(RID p_multimesh, const Vector<float> &p_buffer) {
  1557. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1558. ERR_FAIL_NULL(multimesh);
  1559. if (multimesh->uses_colors || multimesh->uses_custom_data) {
  1560. // Color and custom need to be packed so copy buffer to data_cache and pack.
  1561. _multimesh_make_local(multimesh);
  1562. uint32_t old_stride = multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1563. old_stride += multimesh->uses_colors ? 4 : 0;
  1564. old_stride += multimesh->uses_custom_data ? 4 : 0;
  1565. ERR_FAIL_COND(p_buffer.size() != (multimesh->instances * (int)old_stride));
  1566. multimesh->data_cache = p_buffer;
  1567. float *w = multimesh->data_cache.ptrw();
  1568. for (int i = 0; i < multimesh->instances; i++) {
  1569. {
  1570. float *dataptr = w + i * old_stride;
  1571. float *newptr = w + i * multimesh->stride_cache;
  1572. float vals[8] = { dataptr[0], dataptr[1], dataptr[2], dataptr[3], dataptr[4], dataptr[5], dataptr[6], dataptr[7] };
  1573. memcpy(newptr, vals, 8 * 4);
  1574. }
  1575. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1576. float *dataptr = w + i * old_stride + 8;
  1577. float *newptr = w + i * multimesh->stride_cache + 8;
  1578. float vals[8] = { dataptr[0], dataptr[1], dataptr[2], dataptr[3] };
  1579. memcpy(newptr, vals, 4 * 4);
  1580. }
  1581. if (multimesh->uses_colors) {
  1582. float *dataptr = w + i * old_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12);
  1583. float *newptr = w + i * multimesh->stride_cache + multimesh->color_offset_cache;
  1584. uint16_t val[4] = { Math::make_half_float(dataptr[0]), Math::make_half_float(dataptr[1]), Math::make_half_float(dataptr[2]), Math::make_half_float(dataptr[3]) };
  1585. memcpy(newptr, val, 2 * 4);
  1586. }
  1587. if (multimesh->uses_custom_data) {
  1588. float *dataptr = w + i * old_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12) + (multimesh->uses_colors ? 4 : 0);
  1589. float *newptr = w + i * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1590. uint16_t val[4] = { Math::make_half_float(dataptr[0]), Math::make_half_float(dataptr[1]), Math::make_half_float(dataptr[2]), Math::make_half_float(dataptr[3]) };
  1591. memcpy(newptr, val, 2 * 4);
  1592. }
  1593. }
  1594. multimesh->data_cache.resize(multimesh->instances * (int)multimesh->stride_cache);
  1595. const float *r = multimesh->data_cache.ptr();
  1596. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1597. glBufferData(GL_ARRAY_BUFFER, multimesh->data_cache.size() * sizeof(float), r, GL_STATIC_DRAW);
  1598. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1599. } else {
  1600. // Only Transform is being used, so we can upload directly.
  1601. ERR_FAIL_COND(p_buffer.size() != (multimesh->instances * (int)multimesh->stride_cache));
  1602. const float *r = p_buffer.ptr();
  1603. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1604. glBufferData(GL_ARRAY_BUFFER, p_buffer.size() * sizeof(float), r, GL_STATIC_DRAW);
  1605. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1606. }
  1607. multimesh->buffer_set = true;
  1608. if (multimesh->data_cache.size() || multimesh->uses_colors || multimesh->uses_custom_data) {
  1609. //if we have a data cache, just update it
  1610. multimesh->data_cache = multimesh->data_cache;
  1611. {
  1612. //clear dirty since nothing will be dirty anymore
  1613. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1614. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1615. multimesh->data_cache_dirty_regions[i] = false;
  1616. }
  1617. multimesh->data_cache_used_dirty_regions = 0;
  1618. }
  1619. _multimesh_mark_all_dirty(multimesh, false, true); //update AABB
  1620. } else if (multimesh->mesh.is_valid()) {
  1621. //if we have a mesh set, we need to re-generate the AABB from the new data
  1622. const float *data = p_buffer.ptr();
  1623. _multimesh_re_create_aabb(multimesh, data, multimesh->instances);
  1624. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  1625. }
  1626. }
  1627. Vector<float> MeshStorage::multimesh_get_buffer(RID p_multimesh) const {
  1628. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1629. ERR_FAIL_NULL_V(multimesh, Vector<float>());
  1630. Vector<float> ret;
  1631. if (multimesh->buffer == 0 || multimesh->instances == 0) {
  1632. return Vector<float>();
  1633. } else if (multimesh->data_cache.size()) {
  1634. ret = multimesh->data_cache;
  1635. } else {
  1636. // Buffer not cached, so fetch from GPU memory. This can be a stalling operation, avoid whenever possible.
  1637. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1638. ret.resize(multimesh->instances * multimesh->stride_cache);
  1639. {
  1640. float *w = ret.ptrw();
  1641. const uint8_t *r = buffer.ptr();
  1642. memcpy(w, r, buffer.size());
  1643. }
  1644. }
  1645. if (multimesh->uses_colors || multimesh->uses_custom_data) {
  1646. // Need to decompress buffer.
  1647. uint32_t new_stride = multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1648. new_stride += multimesh->uses_colors ? 4 : 0;
  1649. new_stride += multimesh->uses_custom_data ? 4 : 0;
  1650. Vector<float> decompressed;
  1651. decompressed.resize(multimesh->instances * (int)new_stride);
  1652. float *w = decompressed.ptrw();
  1653. const float *r = ret.ptr();
  1654. for (int i = 0; i < multimesh->instances; i++) {
  1655. {
  1656. float *newptr = w + i * new_stride;
  1657. const float *oldptr = r + i * multimesh->stride_cache;
  1658. float vals[8] = { oldptr[0], oldptr[1], oldptr[2], oldptr[3], oldptr[4], oldptr[5], oldptr[6], oldptr[7] };
  1659. memcpy(newptr, vals, 8 * 4);
  1660. }
  1661. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1662. float *newptr = w + i * new_stride + 8;
  1663. const float *oldptr = r + i * multimesh->stride_cache + 8;
  1664. float vals[8] = { oldptr[0], oldptr[1], oldptr[2], oldptr[3] };
  1665. memcpy(newptr, vals, 4 * 4);
  1666. }
  1667. if (multimesh->uses_colors) {
  1668. float *newptr = w + i * new_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12);
  1669. const float *oldptr = r + i * multimesh->stride_cache + multimesh->color_offset_cache;
  1670. uint16_t raw_data[4];
  1671. memcpy(raw_data, oldptr, 2 * 4);
  1672. newptr[0] = Math::half_to_float(raw_data[0]);
  1673. newptr[1] = Math::half_to_float(raw_data[1]);
  1674. newptr[2] = Math::half_to_float(raw_data[2]);
  1675. newptr[3] = Math::half_to_float(raw_data[3]);
  1676. }
  1677. if (multimesh->uses_custom_data) {
  1678. float *newptr = w + i * new_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12) + (multimesh->uses_colors ? 4 : 0);
  1679. const float *oldptr = r + i * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1680. uint16_t raw_data[4];
  1681. memcpy(raw_data, oldptr, 2 * 4);
  1682. newptr[0] = Math::half_to_float(raw_data[0]);
  1683. newptr[1] = Math::half_to_float(raw_data[1]);
  1684. newptr[2] = Math::half_to_float(raw_data[2]);
  1685. newptr[3] = Math::half_to_float(raw_data[3]);
  1686. }
  1687. }
  1688. return decompressed;
  1689. } else {
  1690. return ret;
  1691. }
  1692. }
  1693. void MeshStorage::multimesh_set_visible_instances(RID p_multimesh, int p_visible) {
  1694. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1695. ERR_FAIL_NULL(multimesh);
  1696. ERR_FAIL_COND(p_visible < -1 || p_visible > multimesh->instances);
  1697. if (multimesh->visible_instances == p_visible) {
  1698. return;
  1699. }
  1700. if (multimesh->data_cache.size()) {
  1701. // There is a data cache, but we may need to update some sections.
  1702. _multimesh_mark_all_dirty(multimesh, false, true);
  1703. int start = multimesh->visible_instances >= 0 ? multimesh->visible_instances : multimesh->instances;
  1704. for (int i = start; i < p_visible; i++) {
  1705. _multimesh_mark_dirty(multimesh, i, true);
  1706. }
  1707. }
  1708. multimesh->visible_instances = p_visible;
  1709. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MULTIMESH_VISIBLE_INSTANCES);
  1710. }
  1711. int MeshStorage::multimesh_get_visible_instances(RID p_multimesh) const {
  1712. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1713. ERR_FAIL_NULL_V(multimesh, 0);
  1714. return multimesh->visible_instances;
  1715. }
  1716. void MeshStorage::_update_dirty_multimeshes() {
  1717. while (multimesh_dirty_list) {
  1718. MultiMesh *multimesh = multimesh_dirty_list;
  1719. if (multimesh->data_cache.size()) { //may have been cleared, so only process if it exists
  1720. const float *data = multimesh->data_cache.ptr();
  1721. uint32_t visible_instances = multimesh->visible_instances >= 0 ? multimesh->visible_instances : multimesh->instances;
  1722. if (multimesh->data_cache_used_dirty_regions) {
  1723. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1724. uint32_t visible_region_count = visible_instances == 0 ? 0 : (visible_instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1725. GLint region_size = multimesh->stride_cache * MULTIMESH_DIRTY_REGION_SIZE * sizeof(float);
  1726. if (multimesh->data_cache_used_dirty_regions > 32 || multimesh->data_cache_used_dirty_regions > visible_region_count / 2) {
  1727. // If there too many dirty regions, or represent the majority of regions, just copy all, else transfer cost piles up too much
  1728. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1729. glBufferSubData(GL_ARRAY_BUFFER, 0, MIN(visible_region_count * region_size, multimesh->instances * multimesh->stride_cache * sizeof(float)), data);
  1730. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1731. } else {
  1732. // Not that many regions? update them all
  1733. // TODO: profile the performance cost on low end
  1734. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1735. for (uint32_t i = 0; i < visible_region_count; i++) {
  1736. if (multimesh->data_cache_dirty_regions[i]) {
  1737. GLint offset = i * region_size;
  1738. GLint size = multimesh->stride_cache * (uint32_t)multimesh->instances * (uint32_t)sizeof(float);
  1739. uint32_t region_start_index = multimesh->stride_cache * MULTIMESH_DIRTY_REGION_SIZE * i;
  1740. glBufferSubData(GL_ARRAY_BUFFER, offset, MIN(region_size, size - offset), &data[region_start_index]);
  1741. }
  1742. }
  1743. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1744. }
  1745. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1746. multimesh->data_cache_dirty_regions[i] = false;
  1747. }
  1748. multimesh->data_cache_used_dirty_regions = 0;
  1749. }
  1750. if (multimesh->aabb_dirty && multimesh->mesh.is_valid()) {
  1751. _multimesh_re_create_aabb(multimesh, data, visible_instances);
  1752. multimesh->aabb_dirty = false;
  1753. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  1754. }
  1755. }
  1756. multimesh_dirty_list = multimesh->dirty_list;
  1757. multimesh->dirty_list = nullptr;
  1758. multimesh->dirty = false;
  1759. }
  1760. multimesh_dirty_list = nullptr;
  1761. }
  1762. /* SKELETON API */
  1763. RID MeshStorage::skeleton_allocate() {
  1764. return skeleton_owner.allocate_rid();
  1765. }
  1766. void MeshStorage::skeleton_initialize(RID p_rid) {
  1767. skeleton_owner.initialize_rid(p_rid, Skeleton());
  1768. }
  1769. void MeshStorage::skeleton_free(RID p_rid) {
  1770. _update_dirty_skeletons();
  1771. skeleton_allocate_data(p_rid, 0);
  1772. Skeleton *skeleton = skeleton_owner.get_or_null(p_rid);
  1773. skeleton->dependency.deleted_notify(p_rid);
  1774. skeleton_owner.free(p_rid);
  1775. }
  1776. void MeshStorage::_skeleton_make_dirty(Skeleton *skeleton) {
  1777. if (!skeleton->dirty) {
  1778. skeleton->dirty = true;
  1779. skeleton->dirty_list = skeleton_dirty_list;
  1780. skeleton_dirty_list = skeleton;
  1781. }
  1782. }
  1783. void MeshStorage::skeleton_allocate_data(RID p_skeleton, int p_bones, bool p_2d_skeleton) {
  1784. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1785. ERR_FAIL_NULL(skeleton);
  1786. ERR_FAIL_COND(p_bones < 0);
  1787. if (skeleton->size == p_bones && skeleton->use_2d == p_2d_skeleton) {
  1788. return;
  1789. }
  1790. skeleton->size = p_bones;
  1791. skeleton->use_2d = p_2d_skeleton;
  1792. skeleton->height = (p_bones * (p_2d_skeleton ? 2 : 3)) / 256;
  1793. if ((p_bones * (p_2d_skeleton ? 2 : 3)) % 256) {
  1794. skeleton->height++;
  1795. }
  1796. if (skeleton->transforms_texture != 0) {
  1797. GLES3::Utilities::get_singleton()->texture_free_data(skeleton->transforms_texture);
  1798. skeleton->transforms_texture = 0;
  1799. skeleton->data.clear();
  1800. }
  1801. if (skeleton->size) {
  1802. skeleton->data.resize(256 * skeleton->height * 4);
  1803. glGenTextures(1, &skeleton->transforms_texture);
  1804. glBindTexture(GL_TEXTURE_2D, skeleton->transforms_texture);
  1805. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 256, skeleton->height, 0, GL_RGBA, GL_FLOAT, nullptr);
  1806. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
  1807. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
  1808. glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  1809. glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  1810. glBindTexture(GL_TEXTURE_2D, 0);
  1811. GLES3::Utilities::get_singleton()->texture_allocated_data(skeleton->transforms_texture, skeleton->data.size() * sizeof(float), "Skeleton transforms texture");
  1812. memset(skeleton->data.ptrw(), 0, skeleton->data.size() * sizeof(float));
  1813. _skeleton_make_dirty(skeleton);
  1814. }
  1815. skeleton->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_SKELETON_DATA);
  1816. }
  1817. void MeshStorage::skeleton_set_base_transform_2d(RID p_skeleton, const Transform2D &p_base_transform) {
  1818. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1819. ERR_FAIL_NULL(skeleton);
  1820. ERR_FAIL_COND(!skeleton->use_2d);
  1821. skeleton->base_transform_2d = p_base_transform;
  1822. }
  1823. int MeshStorage::skeleton_get_bone_count(RID p_skeleton) const {
  1824. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1825. ERR_FAIL_NULL_V(skeleton, 0);
  1826. return skeleton->size;
  1827. }
  1828. void MeshStorage::skeleton_bone_set_transform(RID p_skeleton, int p_bone, const Transform3D &p_transform) {
  1829. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1830. ERR_FAIL_NULL(skeleton);
  1831. ERR_FAIL_INDEX(p_bone, skeleton->size);
  1832. ERR_FAIL_COND(skeleton->use_2d);
  1833. float *dataptr = skeleton->data.ptrw() + p_bone * 12;
  1834. dataptr[0] = p_transform.basis.rows[0][0];
  1835. dataptr[1] = p_transform.basis.rows[0][1];
  1836. dataptr[2] = p_transform.basis.rows[0][2];
  1837. dataptr[3] = p_transform.origin.x;
  1838. dataptr[4] = p_transform.basis.rows[1][0];
  1839. dataptr[5] = p_transform.basis.rows[1][1];
  1840. dataptr[6] = p_transform.basis.rows[1][2];
  1841. dataptr[7] = p_transform.origin.y;
  1842. dataptr[8] = p_transform.basis.rows[2][0];
  1843. dataptr[9] = p_transform.basis.rows[2][1];
  1844. dataptr[10] = p_transform.basis.rows[2][2];
  1845. dataptr[11] = p_transform.origin.z;
  1846. _skeleton_make_dirty(skeleton);
  1847. }
  1848. Transform3D MeshStorage::skeleton_bone_get_transform(RID p_skeleton, int p_bone) const {
  1849. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1850. ERR_FAIL_NULL_V(skeleton, Transform3D());
  1851. ERR_FAIL_INDEX_V(p_bone, skeleton->size, Transform3D());
  1852. ERR_FAIL_COND_V(skeleton->use_2d, Transform3D());
  1853. const float *dataptr = skeleton->data.ptr() + p_bone * 12;
  1854. Transform3D t;
  1855. t.basis.rows[0][0] = dataptr[0];
  1856. t.basis.rows[0][1] = dataptr[1];
  1857. t.basis.rows[0][2] = dataptr[2];
  1858. t.origin.x = dataptr[3];
  1859. t.basis.rows[1][0] = dataptr[4];
  1860. t.basis.rows[1][1] = dataptr[5];
  1861. t.basis.rows[1][2] = dataptr[6];
  1862. t.origin.y = dataptr[7];
  1863. t.basis.rows[2][0] = dataptr[8];
  1864. t.basis.rows[2][1] = dataptr[9];
  1865. t.basis.rows[2][2] = dataptr[10];
  1866. t.origin.z = dataptr[11];
  1867. return t;
  1868. }
  1869. void MeshStorage::skeleton_bone_set_transform_2d(RID p_skeleton, int p_bone, const Transform2D &p_transform) {
  1870. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1871. ERR_FAIL_NULL(skeleton);
  1872. ERR_FAIL_INDEX(p_bone, skeleton->size);
  1873. ERR_FAIL_COND(!skeleton->use_2d);
  1874. float *dataptr = skeleton->data.ptrw() + p_bone * 8;
  1875. dataptr[0] = p_transform.columns[0][0];
  1876. dataptr[1] = p_transform.columns[1][0];
  1877. dataptr[2] = 0;
  1878. dataptr[3] = p_transform.columns[2][0];
  1879. dataptr[4] = p_transform.columns[0][1];
  1880. dataptr[5] = p_transform.columns[1][1];
  1881. dataptr[6] = 0;
  1882. dataptr[7] = p_transform.columns[2][1];
  1883. _skeleton_make_dirty(skeleton);
  1884. }
  1885. Transform2D MeshStorage::skeleton_bone_get_transform_2d(RID p_skeleton, int p_bone) const {
  1886. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1887. ERR_FAIL_NULL_V(skeleton, Transform2D());
  1888. ERR_FAIL_INDEX_V(p_bone, skeleton->size, Transform2D());
  1889. ERR_FAIL_COND_V(!skeleton->use_2d, Transform2D());
  1890. const float *dataptr = skeleton->data.ptr() + p_bone * 8;
  1891. Transform2D t;
  1892. t.columns[0][0] = dataptr[0];
  1893. t.columns[1][0] = dataptr[1];
  1894. t.columns[2][0] = dataptr[3];
  1895. t.columns[0][1] = dataptr[4];
  1896. t.columns[1][1] = dataptr[5];
  1897. t.columns[2][1] = dataptr[7];
  1898. return t;
  1899. }
  1900. void MeshStorage::_update_dirty_skeletons() {
  1901. while (skeleton_dirty_list) {
  1902. Skeleton *skeleton = skeleton_dirty_list;
  1903. if (skeleton->size) {
  1904. glBindTexture(GL_TEXTURE_2D, skeleton->transforms_texture);
  1905. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 256, skeleton->height, 0, GL_RGBA, GL_FLOAT, skeleton->data.ptr());
  1906. glBindTexture(GL_TEXTURE_2D, 0);
  1907. }
  1908. skeleton_dirty_list = skeleton->dirty_list;
  1909. skeleton->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_SKELETON_BONES);
  1910. skeleton->version++;
  1911. skeleton->dirty = false;
  1912. skeleton->dirty_list = nullptr;
  1913. }
  1914. skeleton_dirty_list = nullptr;
  1915. }
  1916. void MeshStorage::skeleton_update_dependency(RID p_skeleton, DependencyTracker *p_instance) {
  1917. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1918. ERR_FAIL_NULL(skeleton);
  1919. p_instance->update_dependency(&skeleton->dependency);
  1920. }
  1921. #endif // GLES3_ENABLED