nanovg_gl.h 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650
  1. //
  2. // Copyright (c) 2009-2013 Mikko Mononen memon@inside.org
  3. //
  4. // This software is provided 'as-is', without any express or implied
  5. // warranty. In no event will the authors be held liable for any damages
  6. // arising from the use of this software.
  7. // Permission is granted to anyone to use this software for any purpose,
  8. // including commercial applications, and to alter it and redistribute it
  9. // freely, subject to the following restrictions:
  10. // 1. The origin of this software must not be misrepresented; you must not
  11. // claim that you wrote the original software. If you use this software
  12. // in a product, an acknowledgment in the product documentation would be
  13. // appreciated but is not required.
  14. // 2. Altered source versions must be plainly marked as such, and must not be
  15. // misrepresented as being the original software.
  16. // 3. This notice may not be removed or altered from any source distribution.
  17. //
  18. #ifndef NANOVG_GL_H
  19. #define NANOVG_GL_H
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif
  23. // Create flags
  24. enum NVGcreateFlags {
  25. // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA).
  26. NVG_ANTIALIAS = 1<<0,
  27. // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little
  28. // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once.
  29. NVG_STENCIL_STROKES = 1<<1,
  30. // Flag indicating that additional debug checks are done.
  31. NVG_DEBUG = 1<<2,
  32. };
  33. #if defined NANOVG_GL2_IMPLEMENTATION
  34. # define NANOVG_GL2 1
  35. # define NANOVG_GL_IMPLEMENTATION 1
  36. #elif defined NANOVG_GL3_IMPLEMENTATION
  37. # define NANOVG_GL3 1
  38. # define NANOVG_GL_IMPLEMENTATION 1
  39. # define NANOVG_GL_USE_UNIFORMBUFFER 1
  40. #elif defined NANOVG_GLES2_IMPLEMENTATION
  41. # define NANOVG_GLES2 1
  42. # define NANOVG_GL_IMPLEMENTATION 1
  43. #elif defined NANOVG_GLES3_IMPLEMENTATION
  44. # define NANOVG_GLES3 1
  45. # define NANOVG_GL_IMPLEMENTATION 1
  46. #endif
  47. #define NANOVG_GL_USE_STATE_FILTER (1)
  48. // Creates NanoVG contexts for different OpenGL (ES) versions.
  49. // Flags should be combination of the create flags above.
  50. #if defined NANOVG_GL2
  51. NVGcontext* nvgCreateGL2(int flags);
  52. void nvgDeleteGL2(NVGcontext* ctx);
  53. int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  54. GLuint nvglImageHandleGL2(NVGcontext* ctx, int image);
  55. #endif
  56. #if defined NANOVG_GL3
  57. NVGcontext* nvgCreateGL3(int flags);
  58. void nvgDeleteGL3(NVGcontext* ctx);
  59. int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  60. GLuint nvglImageHandleGL3(NVGcontext* ctx, int image);
  61. #endif
  62. #if defined NANOVG_GLES2
  63. NVGcontext* nvgCreateGLES2(int flags);
  64. void nvgDeleteGLES2(NVGcontext* ctx);
  65. int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  66. GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image);
  67. #endif
  68. #if defined NANOVG_GLES3
  69. NVGcontext* nvgCreateGLES3(int flags);
  70. void nvgDeleteGLES3(NVGcontext* ctx);
  71. int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  72. GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image);
  73. #endif
  74. // These are additional flags on top of NVGimageFlags.
  75. enum NVGimageFlagsGL {
  76. NVG_IMAGE_NODELETE = 1<<16, // Do not delete GL texture handle.
  77. };
  78. #ifdef __cplusplus
  79. }
  80. #endif
  81. #endif /* NANOVG_GL_H */
  82. #ifdef NANOVG_GL_IMPLEMENTATION
  83. #include <stdlib.h>
  84. #include <stdio.h>
  85. #include <string.h>
  86. #include <math.h>
  87. #include "nanovg.h"
  88. enum GLNVGuniformLoc {
  89. GLNVG_LOC_VIEWSIZE,
  90. GLNVG_LOC_TEX,
  91. GLNVG_LOC_FRAG,
  92. GLNVG_MAX_LOCS
  93. };
  94. enum GLNVGshaderType {
  95. NSVG_SHADER_FILLGRAD,
  96. NSVG_SHADER_FILLIMG,
  97. NSVG_SHADER_SIMPLE,
  98. NSVG_SHADER_IMG
  99. };
  100. #if NANOVG_GL_USE_UNIFORMBUFFER
  101. enum GLNVGuniformBindings {
  102. GLNVG_FRAG_BINDING = 0,
  103. };
  104. #endif
  105. struct GLNVGshader {
  106. GLuint prog;
  107. GLuint frag;
  108. GLuint vert;
  109. GLint loc[GLNVG_MAX_LOCS];
  110. };
  111. typedef struct GLNVGshader GLNVGshader;
  112. struct GLNVGtexture {
  113. int id;
  114. GLuint tex;
  115. int width, height;
  116. int type;
  117. int flags;
  118. };
  119. typedef struct GLNVGtexture GLNVGtexture;
  120. struct GLNVGblend
  121. {
  122. GLenum srcRGB;
  123. GLenum dstRGB;
  124. GLenum srcAlpha;
  125. GLenum dstAlpha;
  126. };
  127. typedef struct GLNVGblend GLNVGblend;
  128. enum GLNVGcallType {
  129. GLNVG_NONE = 0,
  130. GLNVG_FILL,
  131. GLNVG_CONVEXFILL,
  132. GLNVG_STROKE,
  133. GLNVG_TRIANGLES,
  134. };
  135. struct GLNVGcall {
  136. int type;
  137. int image;
  138. int pathOffset;
  139. int pathCount;
  140. int triangleOffset;
  141. int triangleCount;
  142. int uniformOffset;
  143. GLNVGblend blendFunc;
  144. };
  145. typedef struct GLNVGcall GLNVGcall;
  146. struct GLNVGpath {
  147. int fillOffset;
  148. int fillCount;
  149. int strokeOffset;
  150. int strokeCount;
  151. };
  152. typedef struct GLNVGpath GLNVGpath;
  153. struct GLNVGfragUniforms {
  154. #if NANOVG_GL_USE_UNIFORMBUFFER
  155. float scissorMat[12]; // matrices are actually 3 vec4s
  156. float paintMat[12];
  157. struct NVGcolor innerCol;
  158. struct NVGcolor outerCol;
  159. float scissorExt[2];
  160. float scissorScale[2];
  161. float extent[2];
  162. float radius;
  163. float feather;
  164. float strokeMult;
  165. float strokeThr;
  166. int texType;
  167. int type;
  168. #else
  169. // note: after modifying layout or size of uniform array,
  170. // don't forget to also update the fragment shader source!
  171. #define NANOVG_GL_UNIFORMARRAY_SIZE 11
  172. union {
  173. struct {
  174. float scissorMat[12]; // matrices are actually 3 vec4s
  175. float paintMat[12];
  176. struct NVGcolor innerCol;
  177. struct NVGcolor outerCol;
  178. float scissorExt[2];
  179. float scissorScale[2];
  180. float extent[2];
  181. float radius;
  182. float feather;
  183. float strokeMult;
  184. float strokeThr;
  185. float texType;
  186. float type;
  187. };
  188. float uniformArray[NANOVG_GL_UNIFORMARRAY_SIZE][4];
  189. };
  190. #endif
  191. };
  192. typedef struct GLNVGfragUniforms GLNVGfragUniforms;
  193. struct GLNVGcontext {
  194. GLNVGshader shader;
  195. GLNVGtexture* textures;
  196. float view[2];
  197. int ntextures;
  198. int ctextures;
  199. int textureId;
  200. GLuint vertBuf;
  201. #if defined NANOVG_GL3
  202. GLuint vertArr;
  203. #endif
  204. #if NANOVG_GL_USE_UNIFORMBUFFER
  205. GLuint fragBuf;
  206. #endif
  207. int fragSize;
  208. int flags;
  209. // Per frame buffers
  210. GLNVGcall* calls;
  211. int ccalls;
  212. int ncalls;
  213. GLNVGpath* paths;
  214. int cpaths;
  215. int npaths;
  216. struct NVGvertex* verts;
  217. int cverts;
  218. int nverts;
  219. unsigned char* uniforms;
  220. int cuniforms;
  221. int nuniforms;
  222. // cached state
  223. #if NANOVG_GL_USE_STATE_FILTER
  224. GLuint boundTexture;
  225. GLuint stencilMask;
  226. GLenum stencilFunc;
  227. GLint stencilFuncRef;
  228. GLuint stencilFuncMask;
  229. GLNVGblend blendFunc;
  230. #endif
  231. };
  232. typedef struct GLNVGcontext GLNVGcontext;
  233. static int glnvg__maxi(int a, int b) { return a > b ? a : b; }
  234. #ifdef NANOVG_GLES2
  235. static unsigned int glnvg__nearestPow2(unsigned int num)
  236. {
  237. unsigned n = num > 0 ? num - 1 : 0;
  238. n |= n >> 1;
  239. n |= n >> 2;
  240. n |= n >> 4;
  241. n |= n >> 8;
  242. n |= n >> 16;
  243. n++;
  244. return n;
  245. }
  246. #endif
  247. static void glnvg__bindTexture(GLNVGcontext* gl, GLuint tex)
  248. {
  249. #if NANOVG_GL_USE_STATE_FILTER
  250. if (gl->boundTexture != tex) {
  251. gl->boundTexture = tex;
  252. glBindTexture(GL_TEXTURE_2D, tex);
  253. }
  254. #else
  255. glBindTexture(GL_TEXTURE_2D, tex);
  256. #endif
  257. }
  258. static void glnvg__stencilMask(GLNVGcontext* gl, GLuint mask)
  259. {
  260. #if NANOVG_GL_USE_STATE_FILTER
  261. if (gl->stencilMask != mask) {
  262. gl->stencilMask = mask;
  263. glStencilMask(mask);
  264. }
  265. #else
  266. glStencilMask(mask);
  267. #endif
  268. }
  269. static void glnvg__stencilFunc(GLNVGcontext* gl, GLenum func, GLint ref, GLuint mask)
  270. {
  271. #if NANOVG_GL_USE_STATE_FILTER
  272. if ((gl->stencilFunc != func) ||
  273. (gl->stencilFuncRef != ref) ||
  274. (gl->stencilFuncMask != mask)) {
  275. gl->stencilFunc = func;
  276. gl->stencilFuncRef = ref;
  277. gl->stencilFuncMask = mask;
  278. glStencilFunc(func, ref, mask);
  279. }
  280. #else
  281. glStencilFunc(func, ref, mask);
  282. #endif
  283. }
  284. static void glnvg__blendFuncSeparate(GLNVGcontext* gl, const GLNVGblend* blend)
  285. {
  286. #if NANOVG_GL_USE_STATE_FILTER
  287. if ((gl->blendFunc.srcRGB != blend->srcRGB) ||
  288. (gl->blendFunc.dstRGB != blend->dstRGB) ||
  289. (gl->blendFunc.srcAlpha != blend->srcAlpha) ||
  290. (gl->blendFunc.dstAlpha != blend->dstAlpha)) {
  291. gl->blendFunc = *blend;
  292. glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha);
  293. }
  294. #else
  295. glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha);
  296. #endif
  297. }
  298. static GLNVGtexture* glnvg__allocTexture(GLNVGcontext* gl)
  299. {
  300. GLNVGtexture* tex = NULL;
  301. int i;
  302. for (i = 0; i < gl->ntextures; i++) {
  303. if (gl->textures[i].id == 0) {
  304. tex = &gl->textures[i];
  305. break;
  306. }
  307. }
  308. if (tex == NULL) {
  309. if (gl->ntextures+1 > gl->ctextures) {
  310. GLNVGtexture* textures;
  311. int ctextures = glnvg__maxi(gl->ntextures+1, 4) + gl->ctextures/2; // 1.5x Overallocate
  312. textures = (GLNVGtexture*)realloc(gl->textures, sizeof(GLNVGtexture)*ctextures);
  313. if (textures == NULL) return NULL;
  314. gl->textures = textures;
  315. gl->ctextures = ctextures;
  316. }
  317. tex = &gl->textures[gl->ntextures++];
  318. }
  319. memset(tex, 0, sizeof(*tex));
  320. tex->id = ++gl->textureId;
  321. return tex;
  322. }
  323. static GLNVGtexture* glnvg__findTexture(GLNVGcontext* gl, int id)
  324. {
  325. int i;
  326. for (i = 0; i < gl->ntextures; i++)
  327. if (gl->textures[i].id == id)
  328. return &gl->textures[i];
  329. return NULL;
  330. }
  331. static int glnvg__deleteTexture(GLNVGcontext* gl, int id)
  332. {
  333. int i;
  334. for (i = 0; i < gl->ntextures; i++) {
  335. if (gl->textures[i].id == id) {
  336. if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
  337. glDeleteTextures(1, &gl->textures[i].tex);
  338. memset(&gl->textures[i], 0, sizeof(gl->textures[i]));
  339. return 1;
  340. }
  341. }
  342. return 0;
  343. }
  344. static void glnvg__dumpShaderError(GLuint shader, const char* name, const char* type)
  345. {
  346. GLchar str[512+1];
  347. GLsizei len = 0;
  348. glGetShaderInfoLog(shader, 512, &len, str);
  349. if (len > 512) len = 512;
  350. str[len] = '\0';
  351. printf("Shader %s/%s error:\n%s\n", name, type, str);
  352. }
  353. static void glnvg__dumpProgramError(GLuint prog, const char* name)
  354. {
  355. GLchar str[512+1];
  356. GLsizei len = 0;
  357. glGetProgramInfoLog(prog, 512, &len, str);
  358. if (len > 512) len = 512;
  359. str[len] = '\0';
  360. printf("Program %s error:\n%s\n", name, str);
  361. }
  362. static void glnvg__checkError(GLNVGcontext* gl, const char* str)
  363. {
  364. GLenum err;
  365. if ((gl->flags & NVG_DEBUG) == 0) return;
  366. err = glGetError();
  367. if (err != GL_NO_ERROR) {
  368. printf("Error %08x after %s\n", err, str);
  369. return;
  370. }
  371. }
  372. static int glnvg__createShader(GLNVGshader* shader, const char* name, const char* header, const char* opts, const char* vshader, const char* fshader)
  373. {
  374. GLint status;
  375. GLuint prog, vert, frag;
  376. const char* str[3];
  377. str[0] = header;
  378. str[1] = opts != NULL ? opts : "";
  379. memset(shader, 0, sizeof(*shader));
  380. prog = glCreateProgram();
  381. vert = glCreateShader(GL_VERTEX_SHADER);
  382. frag = glCreateShader(GL_FRAGMENT_SHADER);
  383. str[2] = vshader;
  384. glShaderSource(vert, 3, str, 0);
  385. str[2] = fshader;
  386. glShaderSource(frag, 3, str, 0);
  387. glCompileShader(vert);
  388. glGetShaderiv(vert, GL_COMPILE_STATUS, &status);
  389. if (status != GL_TRUE) {
  390. glnvg__dumpShaderError(vert, name, "vert");
  391. return 0;
  392. }
  393. glCompileShader(frag);
  394. glGetShaderiv(frag, GL_COMPILE_STATUS, &status);
  395. if (status != GL_TRUE) {
  396. glnvg__dumpShaderError(frag, name, "frag");
  397. return 0;
  398. }
  399. glAttachShader(prog, vert);
  400. glAttachShader(prog, frag);
  401. glBindAttribLocation(prog, 0, "vertex");
  402. glBindAttribLocation(prog, 1, "tcoord");
  403. glLinkProgram(prog);
  404. glGetProgramiv(prog, GL_LINK_STATUS, &status);
  405. if (status != GL_TRUE) {
  406. glnvg__dumpProgramError(prog, name);
  407. return 0;
  408. }
  409. shader->prog = prog;
  410. shader->vert = vert;
  411. shader->frag = frag;
  412. return 1;
  413. }
  414. static void glnvg__deleteShader(GLNVGshader* shader)
  415. {
  416. if (shader->prog != 0)
  417. glDeleteProgram(shader->prog);
  418. if (shader->vert != 0)
  419. glDeleteShader(shader->vert);
  420. if (shader->frag != 0)
  421. glDeleteShader(shader->frag);
  422. }
  423. static void glnvg__getUniforms(GLNVGshader* shader)
  424. {
  425. shader->loc[GLNVG_LOC_VIEWSIZE] = glGetUniformLocation(shader->prog, "viewSize");
  426. shader->loc[GLNVG_LOC_TEX] = glGetUniformLocation(shader->prog, "tex");
  427. #if NANOVG_GL_USE_UNIFORMBUFFER
  428. shader->loc[GLNVG_LOC_FRAG] = glGetUniformBlockIndex(shader->prog, "frag");
  429. #else
  430. shader->loc[GLNVG_LOC_FRAG] = glGetUniformLocation(shader->prog, "frag");
  431. #endif
  432. }
  433. static int glnvg__renderCreate(void* uptr)
  434. {
  435. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  436. int align = 4;
  437. // TODO: mediump float may not be enough for GLES2 in iOS.
  438. // see the following discussion: https://github.com/memononen/nanovg/issues/46
  439. static const char* shaderHeader =
  440. #if defined NANOVG_GL2
  441. "#define NANOVG_GL2 1\n"
  442. #elif defined NANOVG_GL3
  443. "#version 150 core\n"
  444. "#define NANOVG_GL3 1\n"
  445. #elif defined NANOVG_GLES2
  446. "#version 100\n"
  447. "#define NANOVG_GL2 1\n"
  448. #elif defined NANOVG_GLES3
  449. "#version 300 es\n"
  450. "#define NANOVG_GL3 1\n"
  451. #endif
  452. #if NANOVG_GL_USE_UNIFORMBUFFER
  453. "#define USE_UNIFORMBUFFER 1\n"
  454. #else
  455. "#define UNIFORMARRAY_SIZE 11\n"
  456. #endif
  457. "\n";
  458. static const char* fillVertShader =
  459. "#ifdef NANOVG_GL3\n"
  460. " uniform vec2 viewSize;\n"
  461. " in vec2 vertex;\n"
  462. " in vec2 tcoord;\n"
  463. " out vec2 ftcoord;\n"
  464. " out vec2 fpos;\n"
  465. "#else\n"
  466. " uniform vec2 viewSize;\n"
  467. " attribute vec2 vertex;\n"
  468. " attribute vec2 tcoord;\n"
  469. " varying vec2 ftcoord;\n"
  470. " varying vec2 fpos;\n"
  471. "#endif\n"
  472. "void main(void) {\n"
  473. " ftcoord = tcoord;\n"
  474. " fpos = vertex;\n"
  475. " gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);\n"
  476. "}\n";
  477. static const char* fillFragShader =
  478. "#ifdef GL_ES\n"
  479. "#if defined(GL_FRAGMENT_PRECISION_HIGH) || defined(NANOVG_GL3)\n"
  480. " precision highp float;\n"
  481. "#else\n"
  482. " precision mediump float;\n"
  483. "#endif\n"
  484. "#endif\n"
  485. "#ifdef NANOVG_GL3\n"
  486. "#ifdef USE_UNIFORMBUFFER\n"
  487. " layout(std140) uniform frag {\n"
  488. " mat3 scissorMat;\n"
  489. " mat3 paintMat;\n"
  490. " vec4 innerCol;\n"
  491. " vec4 outerCol;\n"
  492. " vec2 scissorExt;\n"
  493. " vec2 scissorScale;\n"
  494. " vec2 extent;\n"
  495. " float radius;\n"
  496. " float feather;\n"
  497. " float strokeMult;\n"
  498. " float strokeThr;\n"
  499. " int texType;\n"
  500. " int type;\n"
  501. " };\n"
  502. "#else\n" // NANOVG_GL3 && !USE_UNIFORMBUFFER
  503. " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
  504. "#endif\n"
  505. " uniform sampler2D tex;\n"
  506. " in vec2 ftcoord;\n"
  507. " in vec2 fpos;\n"
  508. " out vec4 outColor;\n"
  509. "#else\n" // !NANOVG_GL3
  510. " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
  511. " uniform sampler2D tex;\n"
  512. " varying vec2 ftcoord;\n"
  513. " varying vec2 fpos;\n"
  514. "#endif\n"
  515. "#ifndef USE_UNIFORMBUFFER\n"
  516. " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n"
  517. " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n"
  518. " #define innerCol frag[6]\n"
  519. " #define outerCol frag[7]\n"
  520. " #define scissorExt frag[8].xy\n"
  521. " #define scissorScale frag[8].zw\n"
  522. " #define extent frag[9].xy\n"
  523. " #define radius frag[9].z\n"
  524. " #define feather frag[9].w\n"
  525. " #define strokeMult frag[10].x\n"
  526. " #define strokeThr frag[10].y\n"
  527. " #define texType int(frag[10].z)\n"
  528. " #define type int(frag[10].w)\n"
  529. "#endif\n"
  530. "\n"
  531. "float sdroundrect(vec2 pt, vec2 ext, float rad) {\n"
  532. " vec2 ext2 = ext - vec2(rad,rad);\n"
  533. " vec2 d = abs(pt) - ext2;\n"
  534. " return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;\n"
  535. "}\n"
  536. "\n"
  537. "// Scissoring\n"
  538. "float scissorMask(vec2 p) {\n"
  539. " vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt);\n"
  540. " sc = vec2(0.5,0.5) - sc * scissorScale;\n"
  541. " return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);\n"
  542. "}\n"
  543. "#ifdef EDGE_AA\n"
  544. "// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.\n"
  545. "float strokeMask() {\n"
  546. " return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);\n"
  547. "}\n"
  548. "#endif\n"
  549. "\n"
  550. "void main(void) {\n"
  551. " vec4 result;\n"
  552. " float scissor = scissorMask(fpos);\n"
  553. "#ifdef EDGE_AA\n"
  554. " float strokeAlpha = strokeMask();\n"
  555. " if (strokeAlpha < strokeThr) discard;\n"
  556. "#else\n"
  557. " float strokeAlpha = 1.0;\n"
  558. "#endif\n"
  559. " if (type == 0) { // Gradient\n"
  560. " // Calculate gradient color using box gradient\n"
  561. " vec2 pt = (paintMat * vec3(fpos,1.0)).xy;\n"
  562. " float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);\n"
  563. " vec4 color = mix(innerCol,outerCol,d);\n"
  564. " // Combine alpha\n"
  565. " color *= strokeAlpha * scissor;\n"
  566. " result = color;\n"
  567. " } else if (type == 1) { // Image\n"
  568. " // Calculate color fron texture\n"
  569. " vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent;\n"
  570. "#ifdef NANOVG_GL3\n"
  571. " vec4 color = texture(tex, pt);\n"
  572. "#else\n"
  573. " vec4 color = texture2D(tex, pt);\n"
  574. "#endif\n"
  575. " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
  576. " if (texType == 2) color = vec4(color.x);"
  577. " // Apply color tint and alpha.\n"
  578. " color *= innerCol;\n"
  579. " // Combine alpha\n"
  580. " color *= strokeAlpha * scissor;\n"
  581. " result = color;\n"
  582. " } else if (type == 2) { // Stencil fill\n"
  583. " result = vec4(1,1,1,1);\n"
  584. " } else if (type == 3) { // Textured tris\n"
  585. "#ifdef NANOVG_GL3\n"
  586. " vec4 color = texture(tex, ftcoord);\n"
  587. "#else\n"
  588. " vec4 color = texture2D(tex, ftcoord);\n"
  589. "#endif\n"
  590. " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
  591. " if (texType == 2) color = vec4(color.x);"
  592. " color *= scissor;\n"
  593. " result = color * innerCol;\n"
  594. " }\n"
  595. "#ifdef NANOVG_GL3\n"
  596. " outColor = result;\n"
  597. "#else\n"
  598. " gl_FragColor = result;\n"
  599. "#endif\n"
  600. "}\n";
  601. glnvg__checkError(gl, "init");
  602. if (gl->flags & NVG_ANTIALIAS) {
  603. if (glnvg__createShader(&gl->shader, "shader", shaderHeader, "#define EDGE_AA 1\n", fillVertShader, fillFragShader) == 0)
  604. return 0;
  605. } else {
  606. if (glnvg__createShader(&gl->shader, "shader", shaderHeader, NULL, fillVertShader, fillFragShader) == 0)
  607. return 0;
  608. }
  609. glnvg__checkError(gl, "uniform locations");
  610. glnvg__getUniforms(&gl->shader);
  611. // Create dynamic vertex array
  612. #if defined NANOVG_GL3
  613. glGenVertexArrays(1, &gl->vertArr);
  614. #endif
  615. glGenBuffers(1, &gl->vertBuf);
  616. #if NANOVG_GL_USE_UNIFORMBUFFER
  617. // Create UBOs
  618. glUniformBlockBinding(gl->shader.prog, gl->shader.loc[GLNVG_LOC_FRAG], GLNVG_FRAG_BINDING);
  619. glGenBuffers(1, &gl->fragBuf);
  620. glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align);
  621. #endif
  622. gl->fragSize = sizeof(GLNVGfragUniforms) + align - sizeof(GLNVGfragUniforms) % align;
  623. glnvg__checkError(gl, "create done");
  624. glFinish();
  625. return 1;
  626. }
  627. static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data)
  628. {
  629. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  630. GLNVGtexture* tex = glnvg__allocTexture(gl);
  631. if (tex == NULL) return 0;
  632. #ifdef NANOVG_GLES2
  633. // Check for non-power of 2.
  634. if (glnvg__nearestPow2(w) != (unsigned int)w || glnvg__nearestPow2(h) != (unsigned int)h) {
  635. // No repeat
  636. if ((imageFlags & NVG_IMAGE_REPEATX) != 0 || (imageFlags & NVG_IMAGE_REPEATY) != 0) {
  637. printf("Repeat X/Y is not supported for non power-of-two textures (%d x %d)\n", w, h);
  638. imageFlags &= ~(NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY);
  639. }
  640. // No mips.
  641. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  642. printf("Mip-maps is not support for non power-of-two textures (%d x %d)\n", w, h);
  643. imageFlags &= ~NVG_IMAGE_GENERATE_MIPMAPS;
  644. }
  645. }
  646. #endif
  647. glGenTextures(1, &tex->tex);
  648. tex->width = w;
  649. tex->height = h;
  650. tex->type = type;
  651. tex->flags = imageFlags;
  652. glnvg__bindTexture(gl, tex->tex);
  653. glPixelStorei(GL_UNPACK_ALIGNMENT,1);
  654. #ifndef NANOVG_GLES2
  655. glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
  656. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  657. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  658. #endif
  659. #if defined (NANOVG_GL2)
  660. // GL 1.4 and later has support for generating mipmaps using a tex parameter.
  661. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  662. glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
  663. }
  664. #endif
  665. if (type == NVG_TEXTURE_RGBA)
  666. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
  667. else
  668. #if defined(NANOVG_GLES2) || defined (NANOVG_GL2)
  669. glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, w, h, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
  670. #elif defined(NANOVG_GLES3)
  671. glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
  672. #else
  673. glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
  674. #endif
  675. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  676. if (imageFlags & NVG_IMAGE_NEAREST) {
  677. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
  678. } else {
  679. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
  680. }
  681. } else {
  682. if (imageFlags & NVG_IMAGE_NEAREST) {
  683. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
  684. } else {
  685. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
  686. }
  687. }
  688. if (imageFlags & NVG_IMAGE_NEAREST) {
  689. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
  690. } else {
  691. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
  692. }
  693. if (imageFlags & NVG_IMAGE_REPEATX)
  694. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
  695. else
  696. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  697. if (imageFlags & NVG_IMAGE_REPEATY)
  698. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
  699. else
  700. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  701. glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
  702. #ifndef NANOVG_GLES2
  703. glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
  704. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  705. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  706. #endif
  707. // The new way to build mipmaps on GLES and GL3
  708. #if !defined(NANOVG_GL2)
  709. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  710. glGenerateMipmap(GL_TEXTURE_2D);
  711. }
  712. #endif
  713. glnvg__checkError(gl, "create tex");
  714. glnvg__bindTexture(gl, 0);
  715. return tex->id;
  716. }
  717. static int glnvg__renderDeleteTexture(void* uptr, int image)
  718. {
  719. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  720. return glnvg__deleteTexture(gl, image);
  721. }
  722. static int glnvg__renderUpdateTexture(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data)
  723. {
  724. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  725. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  726. if (tex == NULL) return 0;
  727. glnvg__bindTexture(gl, tex->tex);
  728. glPixelStorei(GL_UNPACK_ALIGNMENT,1);
  729. #ifndef NANOVG_GLES2
  730. glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
  731. glPixelStorei(GL_UNPACK_SKIP_PIXELS, x);
  732. glPixelStorei(GL_UNPACK_SKIP_ROWS, y);
  733. #else
  734. // No support for all of skip, need to update a whole row at a time.
  735. if (tex->type == NVG_TEXTURE_RGBA)
  736. data += y*tex->width*4;
  737. else
  738. data += y*tex->width;
  739. x = 0;
  740. w = tex->width;
  741. #endif
  742. if (tex->type == NVG_TEXTURE_RGBA)
  743. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RGBA, GL_UNSIGNED_BYTE, data);
  744. else
  745. #if defined(NANOVG_GLES2) || defined(NANOVG_GL2)
  746. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
  747. #else
  748. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RED, GL_UNSIGNED_BYTE, data);
  749. #endif
  750. glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
  751. #ifndef NANOVG_GLES2
  752. glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
  753. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  754. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  755. #endif
  756. glnvg__bindTexture(gl, 0);
  757. return 1;
  758. }
  759. static int glnvg__renderGetTextureSize(void* uptr, int image, int* w, int* h)
  760. {
  761. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  762. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  763. if (tex == NULL) return 0;
  764. *w = tex->width;
  765. *h = tex->height;
  766. return 1;
  767. }
  768. static void glnvg__xformToMat3x4(float* m3, float* t)
  769. {
  770. m3[0] = t[0];
  771. m3[1] = t[1];
  772. m3[2] = 0.0f;
  773. m3[3] = 0.0f;
  774. m3[4] = t[2];
  775. m3[5] = t[3];
  776. m3[6] = 0.0f;
  777. m3[7] = 0.0f;
  778. m3[8] = t[4];
  779. m3[9] = t[5];
  780. m3[10] = 1.0f;
  781. m3[11] = 0.0f;
  782. }
  783. static NVGcolor glnvg__premulColor(NVGcolor c)
  784. {
  785. c.r *= c.a;
  786. c.g *= c.a;
  787. c.b *= c.a;
  788. return c;
  789. }
  790. static int glnvg__convertPaint(GLNVGcontext* gl, GLNVGfragUniforms* frag, NVGpaint* paint,
  791. NVGscissor* scissor, float width, float fringe, float strokeThr)
  792. {
  793. GLNVGtexture* tex = NULL;
  794. float invxform[6];
  795. memset(frag, 0, sizeof(*frag));
  796. frag->innerCol = glnvg__premulColor(paint->innerColor);
  797. frag->outerCol = glnvg__premulColor(paint->outerColor);
  798. if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) {
  799. memset(frag->scissorMat, 0, sizeof(frag->scissorMat));
  800. frag->scissorExt[0] = 1.0f;
  801. frag->scissorExt[1] = 1.0f;
  802. frag->scissorScale[0] = 1.0f;
  803. frag->scissorScale[1] = 1.0f;
  804. } else {
  805. nvgTransformInverse(invxform, scissor->xform);
  806. glnvg__xformToMat3x4(frag->scissorMat, invxform);
  807. frag->scissorExt[0] = scissor->extent[0];
  808. frag->scissorExt[1] = scissor->extent[1];
  809. frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe;
  810. frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe;
  811. }
  812. memcpy(frag->extent, paint->extent, sizeof(frag->extent));
  813. frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe;
  814. frag->strokeThr = strokeThr;
  815. if (paint->image != 0) {
  816. tex = glnvg__findTexture(gl, paint->image);
  817. if (tex == NULL) return 0;
  818. if ((tex->flags & NVG_IMAGE_FLIPY) != 0) {
  819. float m1[6], m2[6];
  820. nvgTransformTranslate(m1, 0.0f, frag->extent[1] * 0.5f);
  821. nvgTransformMultiply(m1, paint->xform);
  822. nvgTransformScale(m2, 1.0f, -1.0f);
  823. nvgTransformMultiply(m2, m1);
  824. nvgTransformTranslate(m1, 0.0f, -frag->extent[1] * 0.5f);
  825. nvgTransformMultiply(m1, m2);
  826. nvgTransformInverse(invxform, m1);
  827. } else {
  828. nvgTransformInverse(invxform, paint->xform);
  829. }
  830. frag->type = NSVG_SHADER_FILLIMG;
  831. #if NANOVG_GL_USE_UNIFORMBUFFER
  832. if (tex->type == NVG_TEXTURE_RGBA)
  833. frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1;
  834. else
  835. frag->texType = 2;
  836. #else
  837. if (tex->type == NVG_TEXTURE_RGBA)
  838. frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0.0f : 1.0f;
  839. else
  840. frag->texType = 2.0f;
  841. #endif
  842. // printf("frag->texType = %d\n", frag->texType);
  843. } else {
  844. frag->type = NSVG_SHADER_FILLGRAD;
  845. frag->radius = paint->radius;
  846. frag->feather = paint->feather;
  847. nvgTransformInverse(invxform, paint->xform);
  848. }
  849. glnvg__xformToMat3x4(frag->paintMat, invxform);
  850. return 1;
  851. }
  852. static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i);
  853. static void glnvg__setUniforms(GLNVGcontext* gl, int uniformOffset, int image)
  854. {
  855. #if NANOVG_GL_USE_UNIFORMBUFFER
  856. glBindBufferRange(GL_UNIFORM_BUFFER, GLNVG_FRAG_BINDING, gl->fragBuf, uniformOffset, sizeof(GLNVGfragUniforms));
  857. #else
  858. GLNVGfragUniforms* frag = nvg__fragUniformPtr(gl, uniformOffset);
  859. glUniform4fv(gl->shader.loc[GLNVG_LOC_FRAG], NANOVG_GL_UNIFORMARRAY_SIZE, &(frag->uniformArray[0][0]));
  860. #endif
  861. if (image != 0) {
  862. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  863. glnvg__bindTexture(gl, tex != NULL ? tex->tex : 0);
  864. glnvg__checkError(gl, "tex paint tex");
  865. } else {
  866. glnvg__bindTexture(gl, 0);
  867. }
  868. }
  869. static void glnvg__renderViewport(void* uptr, float width, float height, float devicePixelRatio)
  870. {
  871. NVG_NOTUSED(devicePixelRatio);
  872. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  873. gl->view[0] = width;
  874. gl->view[1] = height;
  875. }
  876. static void glnvg__fill(GLNVGcontext* gl, GLNVGcall* call)
  877. {
  878. GLNVGpath* paths = &gl->paths[call->pathOffset];
  879. int i, npaths = call->pathCount;
  880. // Draw shapes
  881. glEnable(GL_STENCIL_TEST);
  882. glnvg__stencilMask(gl, 0xff);
  883. glnvg__stencilFunc(gl, GL_ALWAYS, 0, 0xff);
  884. glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
  885. // set bindpoint for solid loc
  886. glnvg__setUniforms(gl, call->uniformOffset, 0);
  887. glnvg__checkError(gl, "fill simple");
  888. glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_INCR_WRAP);
  889. glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_DECR_WRAP);
  890. glDisable(GL_CULL_FACE);
  891. for (i = 0; i < npaths; i++)
  892. glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
  893. glEnable(GL_CULL_FACE);
  894. // Draw anti-aliased pixels
  895. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  896. glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
  897. glnvg__checkError(gl, "fill fill");
  898. if (gl->flags & NVG_ANTIALIAS) {
  899. glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
  900. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  901. // Draw fringes
  902. for (i = 0; i < npaths; i++)
  903. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  904. }
  905. // Draw fill
  906. glnvg__stencilFunc(gl, GL_NOTEQUAL, 0x0, 0xff);
  907. glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
  908. glDrawArrays(GL_TRIANGLE_STRIP, call->triangleOffset, call->triangleCount);
  909. glDisable(GL_STENCIL_TEST);
  910. }
  911. static void glnvg__convexFill(GLNVGcontext* gl, GLNVGcall* call)
  912. {
  913. GLNVGpath* paths = &gl->paths[call->pathOffset];
  914. int i, npaths = call->pathCount;
  915. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  916. glnvg__checkError(gl, "convex fill");
  917. for (i = 0; i < npaths; i++)
  918. glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
  919. if (gl->flags & NVG_ANTIALIAS) {
  920. // Draw fringes
  921. for (i = 0; i < npaths; i++)
  922. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  923. }
  924. }
  925. static void glnvg__stroke(GLNVGcontext* gl, GLNVGcall* call)
  926. {
  927. GLNVGpath* paths = &gl->paths[call->pathOffset];
  928. int npaths = call->pathCount, i;
  929. if (gl->flags & NVG_STENCIL_STROKES) {
  930. glEnable(GL_STENCIL_TEST);
  931. glnvg__stencilMask(gl, 0xff);
  932. // Fill the stroke base without overlap
  933. glnvg__stencilFunc(gl, GL_EQUAL, 0x0, 0xff);
  934. glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
  935. glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
  936. glnvg__checkError(gl, "stroke fill 0");
  937. for (i = 0; i < npaths; i++)
  938. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  939. // Draw anti-aliased pixels.
  940. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  941. glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
  942. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  943. for (i = 0; i < npaths; i++)
  944. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  945. // Clear stencil buffer.
  946. glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
  947. glnvg__stencilFunc(gl, GL_ALWAYS, 0x0, 0xff);
  948. glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
  949. glnvg__checkError(gl, "stroke fill 1");
  950. for (i = 0; i < npaths; i++)
  951. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  952. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  953. glDisable(GL_STENCIL_TEST);
  954. // glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
  955. } else {
  956. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  957. glnvg__checkError(gl, "stroke fill");
  958. // Draw Strokes
  959. for (i = 0; i < npaths; i++)
  960. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  961. }
  962. }
  963. static void glnvg__triangles(GLNVGcontext* gl, GLNVGcall* call)
  964. {
  965. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  966. glnvg__checkError(gl, "triangles fill");
  967. glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount);
  968. }
  969. static void glnvg__renderCancel(void* uptr) {
  970. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  971. gl->nverts = 0;
  972. gl->npaths = 0;
  973. gl->ncalls = 0;
  974. gl->nuniforms = 0;
  975. }
  976. static GLenum glnvg_convertBlendFuncFactor(int factor)
  977. {
  978. if (factor == NVG_ZERO)
  979. return GL_ZERO;
  980. if (factor == NVG_ONE)
  981. return GL_ONE;
  982. if (factor == NVG_SRC_COLOR)
  983. return GL_SRC_COLOR;
  984. if (factor == NVG_ONE_MINUS_SRC_COLOR)
  985. return GL_ONE_MINUS_SRC_COLOR;
  986. if (factor == NVG_DST_COLOR)
  987. return GL_DST_COLOR;
  988. if (factor == NVG_ONE_MINUS_DST_COLOR)
  989. return GL_ONE_MINUS_DST_COLOR;
  990. if (factor == NVG_SRC_ALPHA)
  991. return GL_SRC_ALPHA;
  992. if (factor == NVG_ONE_MINUS_SRC_ALPHA)
  993. return GL_ONE_MINUS_SRC_ALPHA;
  994. if (factor == NVG_DST_ALPHA)
  995. return GL_DST_ALPHA;
  996. if (factor == NVG_ONE_MINUS_DST_ALPHA)
  997. return GL_ONE_MINUS_DST_ALPHA;
  998. if (factor == NVG_SRC_ALPHA_SATURATE)
  999. return GL_SRC_ALPHA_SATURATE;
  1000. return GL_INVALID_ENUM;
  1001. }
  1002. static GLNVGblend glnvg__blendCompositeOperation(NVGcompositeOperationState op)
  1003. {
  1004. GLNVGblend blend;
  1005. blend.srcRGB = glnvg_convertBlendFuncFactor(op.srcRGB);
  1006. blend.dstRGB = glnvg_convertBlendFuncFactor(op.dstRGB);
  1007. blend.srcAlpha = glnvg_convertBlendFuncFactor(op.srcAlpha);
  1008. blend.dstAlpha = glnvg_convertBlendFuncFactor(op.dstAlpha);
  1009. if (blend.srcRGB == GL_INVALID_ENUM || blend.dstRGB == GL_INVALID_ENUM || blend.srcAlpha == GL_INVALID_ENUM || blend.dstAlpha == GL_INVALID_ENUM)
  1010. {
  1011. blend.srcRGB = GL_ONE;
  1012. blend.dstRGB = GL_ONE_MINUS_SRC_ALPHA;
  1013. blend.srcAlpha = GL_ONE;
  1014. blend.dstAlpha = GL_ONE_MINUS_SRC_ALPHA;
  1015. }
  1016. return blend;
  1017. }
  1018. static void glnvg__renderFlush(void* uptr)
  1019. {
  1020. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1021. int i;
  1022. if (gl->ncalls > 0) {
  1023. // Setup require GL state.
  1024. glUseProgram(gl->shader.prog);
  1025. glEnable(GL_CULL_FACE);
  1026. glCullFace(GL_BACK);
  1027. glFrontFace(GL_CCW);
  1028. glEnable(GL_BLEND);
  1029. glDisable(GL_DEPTH_TEST);
  1030. glDisable(GL_SCISSOR_TEST);
  1031. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  1032. glStencilMask(0xffffffff);
  1033. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  1034. glStencilFunc(GL_ALWAYS, 0, 0xffffffff);
  1035. glActiveTexture(GL_TEXTURE0);
  1036. glBindTexture(GL_TEXTURE_2D, 0);
  1037. #if NANOVG_GL_USE_STATE_FILTER
  1038. gl->boundTexture = 0;
  1039. gl->stencilMask = 0xffffffff;
  1040. gl->stencilFunc = GL_ALWAYS;
  1041. gl->stencilFuncRef = 0;
  1042. gl->stencilFuncMask = 0xffffffff;
  1043. gl->blendFunc.srcRGB = GL_INVALID_ENUM;
  1044. gl->blendFunc.srcAlpha = GL_INVALID_ENUM;
  1045. gl->blendFunc.dstRGB = GL_INVALID_ENUM;
  1046. gl->blendFunc.dstAlpha = GL_INVALID_ENUM;
  1047. #endif
  1048. #if NANOVG_GL_USE_UNIFORMBUFFER
  1049. // Upload ubo for frag shaders
  1050. glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
  1051. glBufferData(GL_UNIFORM_BUFFER, gl->nuniforms * gl->fragSize, gl->uniforms, GL_STREAM_DRAW);
  1052. #endif
  1053. // Upload vertex data
  1054. #if defined NANOVG_GL3
  1055. glBindVertexArray(gl->vertArr);
  1056. #endif
  1057. glBindBuffer(GL_ARRAY_BUFFER, gl->vertBuf);
  1058. glBufferData(GL_ARRAY_BUFFER, gl->nverts * sizeof(NVGvertex), gl->verts, GL_STREAM_DRAW);
  1059. glEnableVertexAttribArray(0);
  1060. glEnableVertexAttribArray(1);
  1061. glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(size_t)0);
  1062. glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(0 + 2*sizeof(float)));
  1063. // Set view and texture just once per frame.
  1064. glUniform1i(gl->shader.loc[GLNVG_LOC_TEX], 0);
  1065. glUniform2fv(gl->shader.loc[GLNVG_LOC_VIEWSIZE], 1, gl->view);
  1066. #if NANOVG_GL_USE_UNIFORMBUFFER
  1067. glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
  1068. #endif
  1069. for (i = 0; i < gl->ncalls; i++) {
  1070. GLNVGcall* call = &gl->calls[i];
  1071. glnvg__blendFuncSeparate(gl,&call->blendFunc);
  1072. if (call->type == GLNVG_FILL)
  1073. glnvg__fill(gl, call);
  1074. else if (call->type == GLNVG_CONVEXFILL)
  1075. glnvg__convexFill(gl, call);
  1076. else if (call->type == GLNVG_STROKE)
  1077. glnvg__stroke(gl, call);
  1078. else if (call->type == GLNVG_TRIANGLES)
  1079. glnvg__triangles(gl, call);
  1080. }
  1081. glDisableVertexAttribArray(0);
  1082. glDisableVertexAttribArray(1);
  1083. #if defined NANOVG_GL3
  1084. glBindVertexArray(0);
  1085. #endif
  1086. glDisable(GL_CULL_FACE);
  1087. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1088. glUseProgram(0);
  1089. glnvg__bindTexture(gl, 0);
  1090. }
  1091. // Reset calls
  1092. gl->nverts = 0;
  1093. gl->npaths = 0;
  1094. gl->ncalls = 0;
  1095. gl->nuniforms = 0;
  1096. }
  1097. static int glnvg__maxVertCount(const NVGpath* paths, int npaths)
  1098. {
  1099. int i, count = 0;
  1100. for (i = 0; i < npaths; i++) {
  1101. count += paths[i].nfill;
  1102. count += paths[i].nstroke;
  1103. }
  1104. return count;
  1105. }
  1106. static GLNVGcall* glnvg__allocCall(GLNVGcontext* gl)
  1107. {
  1108. GLNVGcall* ret = NULL;
  1109. if (gl->ncalls+1 > gl->ccalls) {
  1110. GLNVGcall* calls;
  1111. int ccalls = glnvg__maxi(gl->ncalls+1, 128) + gl->ccalls/2; // 1.5x Overallocate
  1112. calls = (GLNVGcall*)realloc(gl->calls, sizeof(GLNVGcall) * ccalls);
  1113. if (calls == NULL) return NULL;
  1114. gl->calls = calls;
  1115. gl->ccalls = ccalls;
  1116. }
  1117. ret = &gl->calls[gl->ncalls++];
  1118. memset(ret, 0, sizeof(GLNVGcall));
  1119. return ret;
  1120. }
  1121. static int glnvg__allocPaths(GLNVGcontext* gl, int n)
  1122. {
  1123. int ret = 0;
  1124. if (gl->npaths+n > gl->cpaths) {
  1125. GLNVGpath* paths;
  1126. int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths/2; // 1.5x Overallocate
  1127. paths = (GLNVGpath*)realloc(gl->paths, sizeof(GLNVGpath) * cpaths);
  1128. if (paths == NULL) return -1;
  1129. gl->paths = paths;
  1130. gl->cpaths = cpaths;
  1131. }
  1132. ret = gl->npaths;
  1133. gl->npaths += n;
  1134. return ret;
  1135. }
  1136. static int glnvg__allocVerts(GLNVGcontext* gl, int n)
  1137. {
  1138. int ret = 0;
  1139. if (gl->nverts+n > gl->cverts) {
  1140. NVGvertex* verts;
  1141. int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate
  1142. verts = (NVGvertex*)realloc(gl->verts, sizeof(NVGvertex) * cverts);
  1143. if (verts == NULL) return -1;
  1144. gl->verts = verts;
  1145. gl->cverts = cverts;
  1146. }
  1147. ret = gl->nverts;
  1148. gl->nverts += n;
  1149. return ret;
  1150. }
  1151. static int glnvg__allocFragUniforms(GLNVGcontext* gl, int n)
  1152. {
  1153. int ret = 0, structSize = gl->fragSize;
  1154. if (gl->nuniforms+n > gl->cuniforms) {
  1155. unsigned char* uniforms;
  1156. int cuniforms = glnvg__maxi(gl->nuniforms+n, 128) + gl->cuniforms/2; // 1.5x Overallocate
  1157. uniforms = (unsigned char*)realloc(gl->uniforms, structSize * cuniforms);
  1158. if (uniforms == NULL) return -1;
  1159. gl->uniforms = uniforms;
  1160. gl->cuniforms = cuniforms;
  1161. }
  1162. ret = gl->nuniforms * structSize;
  1163. gl->nuniforms += n;
  1164. return ret;
  1165. }
  1166. static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i)
  1167. {
  1168. return (GLNVGfragUniforms*)&gl->uniforms[i];
  1169. }
  1170. static void glnvg__vset(NVGvertex* vtx, float x, float y, float u, float v)
  1171. {
  1172. vtx->x = x;
  1173. vtx->y = y;
  1174. vtx->u = u;
  1175. vtx->v = v;
  1176. }
  1177. static void glnvg__renderFill(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe,
  1178. const float* bounds, const NVGpath* paths, int npaths)
  1179. {
  1180. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1181. GLNVGcall* call = glnvg__allocCall(gl);
  1182. NVGvertex* quad;
  1183. GLNVGfragUniforms* frag;
  1184. int i, maxverts, offset;
  1185. if (call == NULL) return;
  1186. call->type = GLNVG_FILL;
  1187. call->triangleCount = 4;
  1188. call->pathOffset = glnvg__allocPaths(gl, npaths);
  1189. if (call->pathOffset == -1) goto error;
  1190. call->pathCount = npaths;
  1191. call->image = paint->image;
  1192. call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
  1193. if (npaths == 1 && paths[0].convex)
  1194. {
  1195. call->type = GLNVG_CONVEXFILL;
  1196. call->triangleCount = 0; // Bounding box fill quad not needed for convex fill
  1197. }
  1198. // Allocate vertices for all the paths.
  1199. maxverts = glnvg__maxVertCount(paths, npaths) + call->triangleCount;
  1200. offset = glnvg__allocVerts(gl, maxverts);
  1201. if (offset == -1) goto error;
  1202. for (i = 0; i < npaths; i++) {
  1203. GLNVGpath* copy = &gl->paths[call->pathOffset + i];
  1204. const NVGpath* path = &paths[i];
  1205. memset(copy, 0, sizeof(GLNVGpath));
  1206. if (path->nfill > 0) {
  1207. copy->fillOffset = offset;
  1208. copy->fillCount = path->nfill;
  1209. memcpy(&gl->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill);
  1210. offset += path->nfill;
  1211. }
  1212. if (path->nstroke > 0) {
  1213. copy->strokeOffset = offset;
  1214. copy->strokeCount = path->nstroke;
  1215. memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
  1216. offset += path->nstroke;
  1217. }
  1218. }
  1219. // Setup uniforms for draw calls
  1220. if (call->type == GLNVG_FILL) {
  1221. // Quad
  1222. call->triangleOffset = offset;
  1223. quad = &gl->verts[call->triangleOffset];
  1224. glnvg__vset(&quad[0], bounds[2], bounds[3], 0.5f, 1.0f);
  1225. glnvg__vset(&quad[1], bounds[2], bounds[1], 0.5f, 1.0f);
  1226. glnvg__vset(&quad[2], bounds[0], bounds[3], 0.5f, 1.0f);
  1227. glnvg__vset(&quad[3], bounds[0], bounds[1], 0.5f, 1.0f);
  1228. call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
  1229. if (call->uniformOffset == -1) goto error;
  1230. // Simple shader for stencil
  1231. frag = nvg__fragUniformPtr(gl, call->uniformOffset);
  1232. memset(frag, 0, sizeof(*frag));
  1233. frag->strokeThr = -1.0f;
  1234. frag->type = NSVG_SHADER_SIMPLE;
  1235. // Fill shader
  1236. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe, -1.0f);
  1237. } else {
  1238. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1239. if (call->uniformOffset == -1) goto error;
  1240. // Fill shader
  1241. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f);
  1242. }
  1243. return;
  1244. error:
  1245. // We get here if call alloc was ok, but something else is not.
  1246. // Roll back the last call to prevent drawing it.
  1247. if (gl->ncalls > 0) gl->ncalls--;
  1248. }
  1249. static void glnvg__renderStroke(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe,
  1250. float strokeWidth, const NVGpath* paths, int npaths)
  1251. {
  1252. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1253. GLNVGcall* call = glnvg__allocCall(gl);
  1254. int i, maxverts, offset;
  1255. if (call == NULL) return;
  1256. call->type = GLNVG_STROKE;
  1257. call->pathOffset = glnvg__allocPaths(gl, npaths);
  1258. if (call->pathOffset == -1) goto error;
  1259. call->pathCount = npaths;
  1260. call->image = paint->image;
  1261. call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
  1262. // Allocate vertices for all the paths.
  1263. maxverts = glnvg__maxVertCount(paths, npaths);
  1264. offset = glnvg__allocVerts(gl, maxverts);
  1265. if (offset == -1) goto error;
  1266. for (i = 0; i < npaths; i++) {
  1267. GLNVGpath* copy = &gl->paths[call->pathOffset + i];
  1268. const NVGpath* path = &paths[i];
  1269. memset(copy, 0, sizeof(GLNVGpath));
  1270. if (path->nstroke) {
  1271. copy->strokeOffset = offset;
  1272. copy->strokeCount = path->nstroke;
  1273. memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
  1274. offset += path->nstroke;
  1275. }
  1276. }
  1277. if (gl->flags & NVG_STENCIL_STROKES) {
  1278. // Fill shader
  1279. call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
  1280. if (call->uniformOffset == -1) goto error;
  1281. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
  1282. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
  1283. } else {
  1284. // Fill shader
  1285. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1286. if (call->uniformOffset == -1) goto error;
  1287. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
  1288. }
  1289. return;
  1290. error:
  1291. // We get here if call alloc was ok, but something else is not.
  1292. // Roll back the last call to prevent drawing it.
  1293. if (gl->ncalls > 0) gl->ncalls--;
  1294. }
  1295. static void glnvg__renderTriangles(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor,
  1296. const NVGvertex* verts, int nverts)
  1297. {
  1298. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1299. GLNVGcall* call = glnvg__allocCall(gl);
  1300. GLNVGfragUniforms* frag;
  1301. if (call == NULL) return;
  1302. call->type = GLNVG_TRIANGLES;
  1303. call->image = paint->image;
  1304. call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
  1305. // Allocate vertices for all the paths.
  1306. call->triangleOffset = glnvg__allocVerts(gl, nverts);
  1307. if (call->triangleOffset == -1) goto error;
  1308. call->triangleCount = nverts;
  1309. memcpy(&gl->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts);
  1310. // Fill shader
  1311. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1312. if (call->uniformOffset == -1) goto error;
  1313. frag = nvg__fragUniformPtr(gl, call->uniformOffset);
  1314. glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, 1.0f, -1.0f);
  1315. frag->type = NSVG_SHADER_IMG;
  1316. return;
  1317. error:
  1318. // We get here if call alloc was ok, but something else is not.
  1319. // Roll back the last call to prevent drawing it.
  1320. if (gl->ncalls > 0) gl->ncalls--;
  1321. }
  1322. static void glnvg__renderDelete(void* uptr)
  1323. {
  1324. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1325. int i;
  1326. if (gl == NULL) return;
  1327. glnvg__deleteShader(&gl->shader);
  1328. #if NANOVG_GL3
  1329. #if NANOVG_GL_USE_UNIFORMBUFFER
  1330. if (gl->fragBuf != 0)
  1331. glDeleteBuffers(1, &gl->fragBuf);
  1332. #endif
  1333. if (gl->vertArr != 0)
  1334. glDeleteVertexArrays(1, &gl->vertArr);
  1335. #endif
  1336. if (gl->vertBuf != 0)
  1337. glDeleteBuffers(1, &gl->vertBuf);
  1338. for (i = 0; i < gl->ntextures; i++) {
  1339. if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
  1340. glDeleteTextures(1, &gl->textures[i].tex);
  1341. }
  1342. free(gl->textures);
  1343. free(gl->paths);
  1344. free(gl->verts);
  1345. free(gl->uniforms);
  1346. free(gl->calls);
  1347. free(gl);
  1348. }
  1349. #if defined NANOVG_GL2
  1350. NVGcontext* nvgCreateGL2(int flags)
  1351. #elif defined NANOVG_GL3
  1352. NVGcontext* nvgCreateGL3(int flags)
  1353. #elif defined NANOVG_GLES2
  1354. NVGcontext* nvgCreateGLES2(int flags)
  1355. #elif defined NANOVG_GLES3
  1356. NVGcontext* nvgCreateGLES3(int flags)
  1357. #endif
  1358. {
  1359. NVGparams params;
  1360. NVGcontext* ctx = NULL;
  1361. GLNVGcontext* gl = (GLNVGcontext*)malloc(sizeof(GLNVGcontext));
  1362. if (gl == NULL) goto error;
  1363. memset(gl, 0, sizeof(GLNVGcontext));
  1364. memset(&params, 0, sizeof(params));
  1365. params.renderCreate = glnvg__renderCreate;
  1366. params.renderCreateTexture = glnvg__renderCreateTexture;
  1367. params.renderDeleteTexture = glnvg__renderDeleteTexture;
  1368. params.renderUpdateTexture = glnvg__renderUpdateTexture;
  1369. params.renderGetTextureSize = glnvg__renderGetTextureSize;
  1370. params.renderViewport = glnvg__renderViewport;
  1371. params.renderCancel = glnvg__renderCancel;
  1372. params.renderFlush = glnvg__renderFlush;
  1373. params.renderFill = glnvg__renderFill;
  1374. params.renderStroke = glnvg__renderStroke;
  1375. params.renderTriangles = glnvg__renderTriangles;
  1376. params.renderDelete = glnvg__renderDelete;
  1377. params.userPtr = gl;
  1378. params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0;
  1379. gl->flags = flags;
  1380. ctx = nvgCreateInternal(&params);
  1381. if (ctx == NULL) goto error;
  1382. return ctx;
  1383. error:
  1384. // 'gl' is freed by nvgDeleteInternal.
  1385. if (ctx != NULL) nvgDeleteInternal(ctx);
  1386. return NULL;
  1387. }
  1388. #if defined NANOVG_GL2
  1389. void nvgDeleteGL2(NVGcontext* ctx)
  1390. #elif defined NANOVG_GL3
  1391. void nvgDeleteGL3(NVGcontext* ctx)
  1392. #elif defined NANOVG_GLES2
  1393. void nvgDeleteGLES2(NVGcontext* ctx)
  1394. #elif defined NANOVG_GLES3
  1395. void nvgDeleteGLES3(NVGcontext* ctx)
  1396. #endif
  1397. {
  1398. nvgDeleteInternal(ctx);
  1399. }
  1400. #if defined NANOVG_GL2
  1401. int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1402. #elif defined NANOVG_GL3
  1403. int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1404. #elif defined NANOVG_GLES2
  1405. int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1406. #elif defined NANOVG_GLES3
  1407. int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1408. #endif
  1409. {
  1410. GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
  1411. GLNVGtexture* tex = glnvg__allocTexture(gl);
  1412. if (tex == NULL) return 0;
  1413. tex->type = NVG_TEXTURE_RGBA;
  1414. tex->tex = textureId;
  1415. tex->flags = imageFlags;
  1416. tex->width = w;
  1417. tex->height = h;
  1418. return tex->id;
  1419. }
  1420. #if defined NANOVG_GL2
  1421. GLuint nvglImageHandleGL2(NVGcontext* ctx, int image)
  1422. #elif defined NANOVG_GL3
  1423. GLuint nvglImageHandleGL3(NVGcontext* ctx, int image)
  1424. #elif defined NANOVG_GLES2
  1425. GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image)
  1426. #elif defined NANOVG_GLES3
  1427. GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image)
  1428. #endif
  1429. {
  1430. GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
  1431. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  1432. return tex->tex;
  1433. }
  1434. #endif /* NANOVG_GL_IMPLEMENTATION */