BITBLT.C 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528
  1. /*
  2. THE COMPUTER CODE CONTAINED HEREIN IS THE SOLE PROPERTY OF PARALLAX
  3. SOFTWARE CORPORATION ("PARALLAX"). PARALLAX, IN DISTRIBUTING THE CODE TO
  4. END-USERS, AND SUBJECT TO ALL OF THE TERMS AND CONDITIONS HEREIN, GRANTS A
  5. ROYALTY-FREE, PERPETUAL LICENSE TO SUCH END-USERS FOR USE BY SUCH END-USERS
  6. IN USING, DISPLAYING, AND CREATING DERIVATIVE WORKS THEREOF, SO LONG AS
  7. SUCH USE, DISPLAY OR CREATION IS FOR NON-COMMERCIAL, ROYALTY OR REVENUE
  8. FREE PURPOSES. IN NO EVENT SHALL THE END-USER USE THE COMPUTER CODE
  9. CONTAINED HEREIN FOR REVENUE-BEARING PURPOSES. THE END-USER UNDERSTANDS
  10. AND AGREES TO THE TERMS HEREIN AND ACCEPTS THE SAME BY USE OF THIS FILE.
  11. COPYRIGHT 1993-1999 PARALLAX SOFTWARE CORPORATION. ALL RIGHTS RESERVED.
  12. */
  13. #include "pa_enabl.h" //$$POLY_ACC
  14. #include "mem.h"
  15. #include "gr.h"
  16. #include "grdef.h"
  17. #include "rle.h"
  18. #include "mono.h"
  19. #include "byteswap.h" // because of rle code that has short for row offsets
  20. #include "error.h"
  21. #if defined(POLY_ACC)
  22. #include "poly_acc.h"
  23. #endif
  24. int gr_bitblt_dest_step_shift = 0;
  25. int gr_bitblt_double = 0;
  26. ubyte *gr_bitblt_fade_table=NULL;
  27. extern void gr_vesa_bitmap( grs_bitmap * source, grs_bitmap * dest, int x, int y );
  28. // This code aligns edi so that the destination is aligned to a dword boundry before rep movsd
  29. void gr_linear_movsd(ubyte * src, ubyte * dest, int num_pixels );
  30. #ifdef MACINTOSH
  31. #define THRESHOLD 8
  32. #ifdef RELEASE
  33. #define test_byteblit 0
  34. #else
  35. ubyte test_byteblit = 0;
  36. #endif
  37. void gr_linear_movsd(ubyte * src, ubyte * dest, int num_pixels )
  38. {
  39. int i;
  40. uint n, r;
  41. double *d, *s;
  42. ubyte *d1, *s1;
  43. // check to see if we are starting on an even byte boundry
  44. // if not, move appropriate number of bytes to even
  45. // 8 byte boundry
  46. if ( (num_pixels < THRESHOLD) || (((int)src & 0x7) != ((int)dest & 0x7)) || test_byteblit ) {
  47. for (i = 0; i < num_pixels; i++)
  48. *dest++ = *src++;
  49. return;
  50. }
  51. i = 0;
  52. if ((r = (int)src & 0x7)) {
  53. for (i = 0; i < 8 - r; i++)
  54. *dest++ = *src++;
  55. }
  56. num_pixels -= i;
  57. n = num_pixels / 8;
  58. r = num_pixels % 8;
  59. s = (double *)src;
  60. d = (double *)dest;
  61. for (i = 0; i < n; i++)
  62. *d++ = *s++;
  63. s1 = (ubyte *)s;
  64. d1 = (ubyte *)d;
  65. for (i = 0; i < r; i++)
  66. *d1++ = *s1++;
  67. }
  68. #endif //#ifdef MACINTOSH
  69. void gr_linear_rep_movsdm(ubyte * src, ubyte * dest, int num_pixels );
  70. #ifndef MACINTOSH
  71. #pragma aux gr_linear_rep_movsdm parm [esi] [edi] [ecx] modify exact [ecx esi edi eax] = \
  72. "nextpixel:" \
  73. "mov al,[esi]" \
  74. "inc esi" \
  75. "cmp al, 255" \
  76. "je skip_it" \
  77. "mov [edi], al" \
  78. "skip_it:" \
  79. "inc edi" \
  80. "dec ecx" \
  81. "jne nextpixel";
  82. #else // ifdef MACINTOSH
  83. void gr_linear_rep_movsdm(ubyte * src, ubyte * dest, int num_pixels )
  84. {
  85. int i;
  86. for (i=0; i<num_pixels; i++ ) {
  87. if (*src != TRANSPARENCY_COLOR )
  88. *dest = *src;
  89. dest++;
  90. src++;
  91. }
  92. }
  93. #endif // ifdef MACINTOSH
  94. void gr_linear_rep_movsdm_faded(ubyte * src, ubyte * dest, int num_pixels, ubyte fade_value );
  95. #ifndef MACINTOSH
  96. #pragma aux gr_linear_rep_movsdm_faded parm [esi] [edi] [ecx] [ebx] modify exact [ecx esi edi eax ebx] = \
  97. " xor eax, eax" \
  98. " mov ah, bl" \
  99. "nextpixel:" \
  100. "mov al,[esi]" \
  101. "inc esi" \
  102. "cmp al, 255" \
  103. "je skip_it" \
  104. "mov al, gr_fade_table[eax]" \
  105. "mov [edi], al" \
  106. "skip_it:" \
  107. "inc edi" \
  108. "dec ecx" \
  109. "jne nextpixel";
  110. #else
  111. void gr_linear_rep_movsdm_faded(ubyte * src, ubyte * dest, int num_pixels, ubyte fade_value )
  112. {
  113. int i;
  114. ubyte source;
  115. ubyte *fade_base;
  116. fade_base = gr_fade_table + (fade_value * 256);
  117. for (i=num_pixels; i != 0; i-- )
  118. {
  119. source = *src;
  120. if (source != (ubyte)TRANSPARENCY_COLOR )
  121. *dest = *(fade_base + source);
  122. dest++;
  123. src++;
  124. }
  125. }
  126. #endif // ifdef MACINTOSH
  127. void gr_linear_rep_movsd_2x(ubyte * src, ubyte * dest, int num_dest_pixels );
  128. #ifndef MACINTOSH
  129. #pragma aux gr_linear_rep_movsd_2x parm [esi] [edi] [ecx] modify exact [ecx esi edi eax ebx] = \
  130. "shr ecx, 1" \
  131. "jnc nextpixel" \
  132. "mov al, [esi]" \
  133. "mov [edi], al" \
  134. "inc esi" \
  135. "inc edi" \
  136. "cmp ecx, 0" \
  137. "je done" \
  138. "nextpixel:" \
  139. "mov al,[esi]" \
  140. "mov ah, al" \
  141. "mov [edi], ax" \
  142. "inc esi" \
  143. "inc edi" \
  144. "inc edi" \
  145. "dec ecx" \
  146. "jne nextpixel" \
  147. "done:"
  148. #else // ifdef MACINTOSH
  149. void gr_linear_rep_movsd_2x(ubyte *src, ubyte *dest, int num_pixels)
  150. {
  151. double *d = (double *)dest;
  152. uint *s = (uint *)src;
  153. uint doubletemp[2];
  154. uint temp, work;
  155. int i;
  156. if (num_pixels & 0x3) { // not a multiple of 4? do single pixel at a time
  157. for (i=0; i<num_pixels; i++) {
  158. *dest++ = *src;
  159. *dest++ = *src++;
  160. }
  161. return;
  162. }
  163. for (i = 0; i < num_pixels / 4; i++) {
  164. temp = work = *s++;
  165. temp = ((temp >> 8) & 0x00FFFF00) | (temp & 0xFF0000FF); // 0xABCDEFGH -> 0xABABCDEF
  166. temp = ((temp >> 8) & 0x000000FF) | (temp & 0xFFFFFF00); // 0xABABCDEF -> 0xABABCDCD
  167. doubletemp[0] = temp;
  168. work = ((work << 8) & 0x00FFFF00) | (work & 0xFF0000FF); // 0xABCDEFGH -> 0xABEFGHGH
  169. work = ((work << 8) & 0xFF000000) | (work & 0x00FFFFFF); // 0xABEFGHGH -> 0xEFEFGHGH
  170. doubletemp[1] = work;
  171. *d = *(double *) &(doubletemp[0]);
  172. d++;
  173. }
  174. }
  175. #endif
  176. void modex_copy_column(ubyte * src, ubyte * dest, int num_pixels, int src_rowsize, int dest_rowsize );
  177. #ifndef MACINTOSH
  178. #pragma aux modex_copy_column parm [esi] [edi] [ecx] [ebx] [edx] modify exact [ecx esi edi] = \
  179. "nextpixel:" \
  180. "mov al,[esi]" \
  181. "add esi, ebx" \
  182. "mov [edi], al" \
  183. "add edi, edx" \
  184. "dec ecx" \
  185. "jne nextpixel"
  186. #else
  187. void modex_copy_column(ubyte * src, ubyte * dest, int num_pixels, int src_rowsize, int dest_rowsize )
  188. {
  189. src = src;
  190. dest = dest;
  191. num_pixels = num_pixels;
  192. src_rowsize = src_rowsize;
  193. dest_rowsize = dest_rowsize;
  194. Int3();
  195. }
  196. #endif
  197. void modex_copy_column_m(ubyte * src, ubyte * dest, int num_pixels, int src_rowsize, int dest_rowsize );
  198. #ifndef MACINTOSH
  199. #pragma aux modex_copy_column_m parm [esi] [edi] [ecx] [ebx] [edx] modify exact [ecx esi edi] = \
  200. "nextpixel:" \
  201. "mov al,[esi]" \
  202. "add esi, ebx" \
  203. "cmp al, 255" \
  204. "je skip_itx" \
  205. "mov [edi], al" \
  206. "skip_itx:" \
  207. "add edi, edx" \
  208. "dec ecx" \
  209. "jne nextpixel"
  210. #else
  211. void modex_copy_column_m(ubyte * src, ubyte * dest, int num_pixels, int src_rowsize, int dest_rowsize )
  212. {
  213. src = src;
  214. dest = dest;
  215. num_pixels = num_pixels;
  216. src_rowsize = src_rowsize;
  217. dest_rowsize = dest_rowsize;
  218. Int3();
  219. }
  220. #endif
  221. void gr_ubitmap00( int x, int y, grs_bitmap *bm )
  222. {
  223. register int y1;
  224. int dest_rowsize;
  225. unsigned char * dest;
  226. unsigned char * src;
  227. dest_rowsize=grd_curcanv->cv_bitmap.bm_rowsize << gr_bitblt_dest_step_shift;
  228. dest = &(grd_curcanv->cv_bitmap.bm_data[ dest_rowsize*y+x ]);
  229. src = bm->bm_data;
  230. for (y1=0; y1 < bm->bm_h; y1++ ) {
  231. if (gr_bitblt_double)
  232. gr_linear_rep_movsd_2x( src, dest, bm->bm_w );
  233. else
  234. gr_linear_movsd( src, dest, bm->bm_w );
  235. src += bm->bm_rowsize;
  236. dest+= (int)(dest_rowsize);
  237. }
  238. }
  239. void gr_ubitmap00m( int x, int y, grs_bitmap *bm )
  240. {
  241. register int y1;
  242. int dest_rowsize;
  243. unsigned char * dest;
  244. unsigned char * src;
  245. dest_rowsize=grd_curcanv->cv_bitmap.bm_rowsize << gr_bitblt_dest_step_shift;
  246. dest = &(grd_curcanv->cv_bitmap.bm_data[ dest_rowsize*y+x ]);
  247. src = bm->bm_data;
  248. if (gr_bitblt_fade_table==NULL) {
  249. for (y1=0; y1 < bm->bm_h; y1++ ) {
  250. gr_linear_rep_movsdm( src, dest, bm->bm_w );
  251. src += bm->bm_rowsize;
  252. dest+= (int)(dest_rowsize);
  253. }
  254. } else {
  255. for (y1=0; y1 < bm->bm_h; y1++ ) {
  256. gr_linear_rep_movsdm_faded( src, dest, bm->bm_w, gr_bitblt_fade_table[y1+y] );
  257. src += bm->bm_rowsize;
  258. dest+= (int)(dest_rowsize);
  259. }
  260. }
  261. }
  262. //" jmp aligned4 " \
  263. //" mov eax, edi " \
  264. //" and eax, 11b " \
  265. //" jz aligned4 " \
  266. //" mov ebx, 4 " \
  267. //" sub ebx, eax " \
  268. //" sub ecx, ebx " \
  269. //"alignstart: " \
  270. //" mov al, [esi] " \
  271. //" add esi, 4 " \
  272. //" mov [edi], al " \
  273. //" inc edi " \
  274. //" dec ebx " \
  275. //" jne alignstart " \
  276. //"aligned4: " \
  277. void modex_copy_scanline( ubyte * src, ubyte * dest, int npixels );
  278. #ifndef MACINTOSH
  279. #pragma aux modex_copy_scanline parm [esi] [edi] [ecx] modify exact [ecx esi edi eax ebx edx] = \
  280. " mov ebx, ecx " \
  281. " and ebx, 11b " \
  282. " shr ecx, 2 " \
  283. " cmp ecx, 0 " \
  284. " je no2group " \
  285. "next4pixels: " \
  286. " mov al, [esi+8] " \
  287. " mov ah, [esi+12] " \
  288. " shl eax, 16 " \
  289. " mov al, [esi] " \
  290. " mov ah, [esi+4] " \
  291. " mov [edi], eax " \
  292. " add esi, 16 " \
  293. " add edi, 4 " \
  294. " dec ecx " \
  295. " jne next4pixels " \
  296. "no2group: " \
  297. " cmp ebx, 0 " \
  298. " je done2 " \
  299. "finishend: " \
  300. " mov al, [esi] " \
  301. " add esi, 4 " \
  302. " mov [edi], al " \
  303. " inc edi " \
  304. " dec ebx " \
  305. " jne finishend " \
  306. "done2: ";
  307. #else
  308. void modex_copy_scanline( ubyte * src, ubyte * dest, int npixels )
  309. {
  310. src = src;
  311. dest = dest;
  312. npixels = npixels;
  313. Int3();
  314. }
  315. #endif
  316. void modex_copy_scanline_2x( ubyte * src, ubyte * dest, int npixels );
  317. #ifndef MACINTOSH
  318. #pragma aux modex_copy_scanline_2x parm [esi] [edi] [ecx] modify exact [ecx esi edi eax ebx edx] = \
  319. " mov ebx, ecx " \
  320. " and ebx, 11b " \
  321. " shr ecx, 2 " \
  322. " cmp ecx, 0 " \
  323. " je no2group " \
  324. "next4pixels: " \
  325. " mov al, [esi+4] " \
  326. " mov ah, [esi+6] " \
  327. " shl eax, 16 " \
  328. " mov al, [esi] " \
  329. " mov ah, [esi+2] " \
  330. " mov [edi], eax " \
  331. " add esi, 8 " \
  332. " add edi, 4 " \
  333. " dec ecx " \
  334. " jne next4pixels " \
  335. "no2group: " \
  336. " cmp ebx, 0 " \
  337. " je done2 " \
  338. "finishend: " \
  339. " mov al, [esi] " \
  340. " add esi, 2 " \
  341. " mov [edi], al " \
  342. " inc edi " \
  343. " dec ebx " \
  344. " jne finishend " \
  345. "done2: ";
  346. #else
  347. void modex_copy_scanline_2x( ubyte * src, ubyte * dest, int npixels )
  348. {
  349. src = src;
  350. dest = dest;
  351. npixels = npixels;
  352. Int3();
  353. }
  354. #endif
  355. // From Linear to ModeX
  356. void gr_bm_ubitblt01(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  357. {
  358. ubyte * dbits;
  359. ubyte * sbits;
  360. int sstep,dstep;
  361. int y,plane;
  362. int w1;
  363. if ( w < 4 ) return;
  364. sstep = src->bm_rowsize;
  365. dstep = dest->bm_rowsize << gr_bitblt_dest_step_shift;
  366. if (!gr_bitblt_double) {
  367. for (plane=0; plane<4; plane++ ) {
  368. gr_modex_setplane( (plane+dx)&3 );
  369. sbits = src->bm_data + (src->bm_rowsize * sy) + sx + plane;
  370. dbits = &gr_video_memory[(dest->bm_rowsize * dy) + ((plane+dx)/4) ];
  371. w1 = w >> 2;
  372. if ( (w&3) > plane ) w1++;
  373. for (y=dy; y < dy+h; y++ ) {
  374. modex_copy_scanline( sbits, dbits, w1 );
  375. dbits += dstep;
  376. sbits += sstep;
  377. }
  378. }
  379. } else {
  380. for (plane=0; plane<4; plane++ ) {
  381. gr_modex_setplane( (plane+dx)&3 );
  382. sbits = src->bm_data + (src->bm_rowsize * sy) + sx + plane/2;
  383. dbits = &gr_video_memory[(dest->bm_rowsize * dy) + ((plane+dx)/4) ];
  384. w1 = w >> 2;
  385. if ( (w&3) > plane ) w1++;
  386. for (y=dy; y < dy+h; y++ ) {
  387. modex_copy_scanline_2x( sbits, dbits, w1 );
  388. dbits += dstep;
  389. sbits += sstep;
  390. }
  391. }
  392. }
  393. }
  394. // From Linear to ModeX masked
  395. void gr_bm_ubitblt01m(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  396. {
  397. //ubyte * dbits1;
  398. //ubyte * sbits1;
  399. ubyte * dbits;
  400. ubyte * sbits;
  401. int x;
  402. // int y;
  403. sbits = src->bm_data + (src->bm_rowsize * sy) + sx;
  404. dbits = &gr_video_memory[(dest->bm_rowsize * dy) + dx/4];
  405. for (x=dx; x < dx+w; x++ ) {
  406. gr_modex_setplane( x&3 );
  407. //sbits1 = sbits;
  408. //dbits1 = dbits;
  409. //for (y=0; y < h; y++ ) {
  410. // *dbits1 = *sbits1;
  411. // sbits1 += src_bm_rowsize;
  412. // dbits1 += dest_bm_rowsize;
  413. // }
  414. modex_copy_column_m(sbits, dbits, h, src->bm_rowsize, dest->bm_rowsize << gr_bitblt_dest_step_shift );
  415. sbits++;
  416. if ( (x&3)==3 )
  417. dbits++;
  418. }
  419. }
  420. void gr_ubitmap012( int x, int y, grs_bitmap *bm )
  421. {
  422. register int x1, y1;
  423. unsigned char * src;
  424. src = bm->bm_data;
  425. for (y1=y; y1 < (y+bm->bm_h); y1++ ) {
  426. for (x1=x; x1 < (x+bm->bm_w); x1++ ) {
  427. gr_setcolor( *src++ );
  428. gr_upixel( x1, y1 );
  429. }
  430. }
  431. }
  432. void gr_ubitmap012m( int x, int y, grs_bitmap *bm )
  433. {
  434. register int x1, y1;
  435. unsigned char * src;
  436. src = bm->bm_data;
  437. for (y1=y; y1 < (y+bm->bm_h); y1++ ) {
  438. for (x1=x; x1 < (x+bm->bm_w); x1++ ) {
  439. if ( *src != TRANSPARENCY_COLOR ) {
  440. gr_setcolor( *src );
  441. gr_upixel( x1, y1 );
  442. }
  443. src++;
  444. }
  445. }
  446. }
  447. #if defined(POLY_ACC)
  448. void gr_ubitmap05( int x, int y, grs_bitmap *bm )
  449. {
  450. register int x1, y1;
  451. unsigned char *src;
  452. short *dst;
  453. int mod;
  454. pa_flush();
  455. src = bm->bm_data;
  456. dst = (short *)(DATA + y * ROWSIZE + x * PA_BPP);
  457. mod = ROWSIZE / 2 - bm->bm_w;
  458. for (y1=y; y1 < (y+bm->bm_h); y1++ ) {
  459. for (x1=x; x1 < (x+bm->bm_w); x1++ ) {
  460. *dst++ = pa_clut[*src++];
  461. }
  462. dst += mod;
  463. }
  464. }
  465. void gr_ubitmap05m( int x, int y, grs_bitmap *bm )
  466. {
  467. register int x1, y1;
  468. unsigned char *src;
  469. short *dst;
  470. int mod;
  471. pa_flush();
  472. src = bm->bm_data;
  473. dst = (short *)(DATA + y * ROWSIZE + x * PA_BPP);
  474. mod = ROWSIZE / 2 - bm->bm_w;
  475. for (y1=y; y1 < (y+bm->bm_h); y1++ ) {
  476. for (x1=x; x1 < (x+bm->bm_w); x1++ ) {
  477. if ( *src != TRANSPARENCY_COLOR ) {
  478. *dst = pa_clut[*src];
  479. }
  480. src++;
  481. ++dst;
  482. }
  483. dst += mod;
  484. }
  485. }
  486. void gr_bm_ubitblt05_rle(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  487. {
  488. unsigned short * dbits;
  489. unsigned char * sbits, scanline[640];
  490. int i, data_offset, j, nextrow;
  491. pa_flush();
  492. nextrow=dest->bm_rowsize/PA_BPP;
  493. data_offset = 1;
  494. if (src->bm_flags & BM_FLAG_RLE_BIG)
  495. data_offset = 2;
  496. sbits = &src->bm_data[4 + (src->bm_h*data_offset)];
  497. for (i=0; i<sy; i++ )
  498. sbits += (int)(INTEL_SHORT(src->bm_data[4+(i*data_offset)]));
  499. dbits = (unsigned short *)(dest->bm_data + (dest->bm_rowsize * dy) + dx*PA_BPP);
  500. // No interlacing, copy the whole buffer.
  501. for (i=0; i < h; i++ ) {
  502. gr_rle_expand_scanline( scanline, sbits, sx, sx+w-1 );
  503. for(j = 0; j != w; ++j)
  504. dbits[j] = pa_clut[scanline[j]];
  505. if ( src->bm_flags & BM_FLAG_RLE_BIG )
  506. sbits += (int)INTEL_SHORT(*((short *)&(src->bm_data[4+((i+sy)*data_offset)])));
  507. else
  508. sbits += (int)(src->bm_data[4+i+sy]);
  509. dbits += nextrow;
  510. }
  511. }
  512. void gr_bm_ubitblt05m_rle(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  513. {
  514. unsigned short * dbits;
  515. unsigned char * sbits, scanline[640];
  516. int i, data_offset, j, nextrow;
  517. pa_flush();
  518. nextrow=dest->bm_rowsize/PA_BPP;
  519. data_offset = 1;
  520. if (src->bm_flags & BM_FLAG_RLE_BIG)
  521. data_offset = 2;
  522. sbits = &src->bm_data[4 + (src->bm_h*data_offset)];
  523. for (i=0; i<sy; i++ )
  524. sbits += (int)(INTEL_SHORT(src->bm_data[4+(i*data_offset)]));
  525. dbits = (unsigned short *)(dest->bm_data + (dest->bm_rowsize * dy) + dx*PA_BPP);
  526. // No interlacing, copy the whole buffer.
  527. for (i=0; i < h; i++ ) {
  528. gr_rle_expand_scanline( scanline, sbits, sx, sx+w-1 );
  529. for(j = 0; j != w; ++j)
  530. if(scanline[j] != TRANSPARENCY_COLOR)
  531. dbits[j] = pa_clut[scanline[j]];
  532. if ( src->bm_flags & BM_FLAG_RLE_BIG )
  533. sbits += (int)INTEL_SHORT(*((short *)&(src->bm_data[4+((i+sy)*data_offset)])));
  534. else
  535. sbits += (int)(src->bm_data[4+i+sy]);
  536. dbits += nextrow;
  537. }
  538. }
  539. #endif
  540. void gr_ubitmapGENERIC(int x, int y, grs_bitmap * bm)
  541. {
  542. register int x1, y1;
  543. for (y1=0; y1 < bm->bm_h; y1++ ) {
  544. for (x1=0; x1 < bm->bm_w; x1++ ) {
  545. gr_setcolor( gr_gpixel(bm,x1,y1) );
  546. gr_upixel( x+x1, y+y1 );
  547. }
  548. }
  549. }
  550. void gr_ubitmapGENERICm(int x, int y, grs_bitmap * bm)
  551. {
  552. register int x1, y1;
  553. ubyte c;
  554. for (y1=0; y1 < bm->bm_h; y1++ ) {
  555. for (x1=0; x1 < bm->bm_w; x1++ ) {
  556. c = gr_gpixel(bm,x1,y1);
  557. if ( c != TRANSPARENCY_COLOR ) {
  558. gr_setcolor( c );
  559. gr_upixel( x+x1, y+y1 );
  560. }
  561. }
  562. }
  563. }
  564. // From linear to SVGA
  565. void gr_bm_ubitblt02(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  566. {
  567. unsigned char * sbits;
  568. unsigned int offset, EndingOffset, VideoLocation;
  569. int sbpr, dbpr, y1, page, BytesToMove;
  570. sbpr = src->bm_rowsize;
  571. dbpr = dest->bm_rowsize << gr_bitblt_dest_step_shift;
  572. VideoLocation = (unsigned int)dest->bm_data + (dest->bm_rowsize * dy) + dx;
  573. sbits = src->bm_data + ( sbpr*sy ) + sx;
  574. for (y1=0; y1 < h; y1++ ) {
  575. page = VideoLocation >> 16;
  576. offset = VideoLocation & 0xFFFF;
  577. gr_vesa_setpage( page );
  578. EndingOffset = offset+w-1;
  579. if ( EndingOffset <= 0xFFFF )
  580. {
  581. if ( gr_bitblt_double )
  582. gr_linear_rep_movsd_2x( (void *)sbits, (void *)(offset+0xA0000), w );
  583. else
  584. gr_linear_movsd( (void *)sbits, (void *)(offset+0xA0000), w );
  585. VideoLocation += dbpr;
  586. sbits += sbpr;
  587. }
  588. else
  589. {
  590. BytesToMove = 0xFFFF-offset+1;
  591. if ( gr_bitblt_double )
  592. gr_linear_rep_movsd_2x( (void *)sbits, (void *)(offset+0xA0000), BytesToMove );
  593. else
  594. gr_linear_movsd( (void *)sbits, (void *)(offset+0xA0000), BytesToMove );
  595. page++;
  596. gr_vesa_setpage(page);
  597. if ( gr_bitblt_double )
  598. gr_linear_rep_movsd_2x( (void *)(sbits+BytesToMove/2), (void *)0xA0000, EndingOffset - 0xFFFF );
  599. else
  600. gr_linear_movsd( (void *)(sbits+BytesToMove), (void *)0xA0000, EndingOffset - 0xFFFF );
  601. VideoLocation += dbpr;
  602. sbits += sbpr;
  603. }
  604. }
  605. }
  606. void gr_bm_ubitblt02m(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  607. {
  608. unsigned char * sbits;
  609. unsigned int offset, EndingOffset, VideoLocation;
  610. int sbpr, dbpr, y1, page, BytesToMove;
  611. sbpr = src->bm_rowsize;
  612. dbpr = dest->bm_rowsize << gr_bitblt_dest_step_shift;
  613. VideoLocation = (unsigned int)dest->bm_data + (dest->bm_rowsize * dy) + dx;
  614. sbits = src->bm_data + ( sbpr*sy ) + sx;
  615. for (y1=0; y1 < h; y1++ ) {
  616. page = VideoLocation >> 16;
  617. offset = VideoLocation & 0xFFFF;
  618. gr_vesa_setpage( page );
  619. EndingOffset = offset+w-1;
  620. if ( EndingOffset <= 0xFFFF )
  621. {
  622. gr_linear_rep_movsdm( (void *)sbits, (void *)(offset+0xA0000), w );
  623. VideoLocation += dbpr;
  624. sbits += sbpr;
  625. }
  626. else
  627. {
  628. BytesToMove = 0xFFFF-offset+1;
  629. gr_linear_rep_movsdm( (void *)sbits, (void *)(offset+0xA0000), BytesToMove );
  630. page++;
  631. gr_vesa_setpage(page);
  632. gr_linear_rep_movsdm( (void *)(sbits+BytesToMove), (void *)0xA0000, EndingOffset - 0xFFFF );
  633. VideoLocation += dbpr;
  634. sbits += sbpr;
  635. }
  636. }
  637. }
  638. // From SVGA to linear
  639. void gr_bm_ubitblt20(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  640. {
  641. unsigned char * dbits;
  642. unsigned int offset, offset1, offset2;
  643. int sbpr, dbpr, y1, page;
  644. dbpr = dest->bm_rowsize;
  645. sbpr = src->bm_rowsize;
  646. for (y1=0; y1 < h; y1++ ) {
  647. offset2 = (unsigned int)src->bm_data + (sbpr * (y1+sy)) + sx;
  648. dbits = dest->bm_data + (dbpr * (y1+dy)) + dx;
  649. page = offset2 >> 16;
  650. offset = offset2 & 0xFFFF;
  651. offset1 = offset+w-1;
  652. gr_vesa_setpage( page );
  653. if ( offset1 > 0xFFFF ) {
  654. // Overlaps two pages
  655. while( offset <= 0xFFFF )
  656. *dbits++ = gr_video_memory[offset++];
  657. offset1 -= (0xFFFF+1);
  658. offset = 0;
  659. page++;
  660. gr_vesa_setpage(page);
  661. }
  662. while( offset <= offset1 )
  663. *dbits++ = gr_video_memory[offset++];
  664. }
  665. }
  666. //@extern int Interlacing_on;
  667. // From Linear to Linear
  668. void gr_bm_ubitblt00(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  669. {
  670. unsigned char * dbits;
  671. unsigned char * sbits;
  672. //int src_bm_rowsize_2, dest_bm_rowsize_2;
  673. int dstep;
  674. int i;
  675. sbits = src->bm_data + (src->bm_rowsize * sy) + sx;
  676. dbits = dest->bm_data + (dest->bm_rowsize * dy) + dx;
  677. dstep = dest->bm_rowsize << gr_bitblt_dest_step_shift;
  678. // No interlacing, copy the whole buffer.
  679. for (i=0; i < h; i++ ) {
  680. if (gr_bitblt_double)
  681. gr_linear_rep_movsd_2x( sbits, dbits, w );
  682. else
  683. gr_linear_movsd( sbits, dbits, w );
  684. sbits += src->bm_rowsize;
  685. dbits += dstep;
  686. }
  687. }
  688. // From Linear to Linear Masked
  689. void gr_bm_ubitblt00m(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  690. {
  691. unsigned char * dbits;
  692. unsigned char * sbits;
  693. //int src_bm_rowsize_2, dest_bm_rowsize_2;
  694. int i;
  695. sbits = src->bm_data + (src->bm_rowsize * sy) + sx;
  696. dbits = dest->bm_data + (dest->bm_rowsize * dy) + dx;
  697. // No interlacing, copy the whole buffer.
  698. if (gr_bitblt_fade_table==NULL) {
  699. for (i=0; i < h; i++ ) {
  700. gr_linear_rep_movsdm( sbits, dbits, w );
  701. sbits += src->bm_rowsize;
  702. dbits += dest->bm_rowsize;
  703. }
  704. } else {
  705. for (i=0; i < h; i++ ) {
  706. gr_linear_rep_movsdm_faded( sbits, dbits, w, gr_bitblt_fade_table[dy+i] );
  707. sbits += src->bm_rowsize;
  708. dbits += dest->bm_rowsize;
  709. }
  710. }
  711. }
  712. extern void gr_lbitblt( grs_bitmap * source, grs_bitmap * dest, int height, int width );
  713. #ifdef MACINTOSH
  714. // width == number of destination pixels
  715. void gr_linear_movsd_double(ubyte *src, ubyte *dest, int width)
  716. {
  717. double *d = (double *)dest;
  718. uint *s = (uint *)src;
  719. uint doubletemp[2];
  720. uint temp, work;
  721. int i, num_pixels;
  722. num_pixels = width / 2;
  723. if ( (num_pixels & 0x3) || (((int)src & 0x7) != ((int)dest & 0x7)) ) { // not a multiple of 4? do single pixel at a time
  724. for (i=0; i<num_pixels; i++) {
  725. *dest++ = *src;
  726. *dest++ = *src++;
  727. }
  728. return;
  729. }
  730. for (i = 0; i < num_pixels / 4; i++) {
  731. temp = work = *s++;
  732. temp = ((temp >> 8) & 0x00FFFF00) | (temp & 0xFF0000FF); // 0xABCDEFGH -> 0xABABCDEF
  733. temp = ((temp >> 8) & 0x000000FF) | (temp & 0xFFFFFF00); // 0xABABCDEF -> 0xABABCDCD
  734. doubletemp[0] = temp;
  735. work = ((work << 8) & 0x00FFFF00) | (work & 0xFF0000FF); // 0xABCDEFGH -> 0xABEFGHGH
  736. work = ((work << 8) & 0xFF000000) | (work & 0x00FFFFFF); // 0xABEFGHGH -> 0xEFEFGHGH
  737. doubletemp[1] = work;
  738. *d = *(double *) &(doubletemp[0]);
  739. d++;
  740. }
  741. }
  742. //extern void BlitLargeAlign(ubyte *draw_buffer, int dstRowBytes, ubyte *dstPtr, int w, int h, int modulus);
  743. asm void BlitLargeAlign(ubyte *rSrcPtr, int rDblDStrd, ubyte *rDst1Ptr, int rWidth, int rHeight, int rModulus)
  744. {
  745. stw r31,-4(SP) // store non-volatile reg in red zone
  746. addi r5,r5,-8 // subtract 8 from dst
  747. stw r30,-8(SP) // store non-volatile reg in red zone
  748. la r30,-16(SP) // calculate copy of local 8-byte variable
  749. sub r9,r8,r6
  750. // rSStrd = modulus - w
  751. add r31,r5,r4 // dst2 = dstRowBytes + dst1
  752. sub r4,r4,r6 // r4 = dstRowBytes - w
  753. addi r7,r7,-1 // subtract 1 from height count
  754. srawi r6,r6,2 // rWidth = w >> 2
  755. addi r3,r3,-4 // subtract 4 from src
  756. addi r6,r6,-1 // subtract 1 from width count
  757. add r4,r4,r4 // rDblDStrd = 2 * r4
  758. BlitLargeAlignY: // y count is in r7
  759. lwzu r10,4(r3) // load a long into r10
  760. mr r0,r10 // put a copy in r0
  761. mr r11,r10
  762. // these are simplified -- can't use 'em inslwi r0,r10,16,8
  763. // these are simplified -- can't use 'em insrwi r11,r10,16,8
  764. rlwimi r0,r10,24,8,31
  765. rlwimi r11,r10,8,8,23
  766. rlwimi r0,r10,16,24,31
  767. stw r0,0(r30)
  768. rlwimi r11,r10,16,0,7
  769. stw r11,4(r30)
  770. mtctr r6 // copy x count into the counter
  771. lfd fp0,0(r30)
  772. BlitLargeAlignX:
  773. lwzu r10,4(r3) // load a long into r10
  774. stfdu fp0,8(r5)
  775. mr r0,r10 // put a copy in r0
  776. mr r11,r10
  777. // simplefied inslwi r0,r10,16,8
  778. // simplefied insrwi r11,r10,16,8
  779. rlwimi r0,r10,24,8,31
  780. rlwimi r11,r10,8,8,23
  781. rlwimi r0,r10,16,24,31
  782. stw r0,0(r30)
  783. rlwimi r11,r10,16,0,7
  784. stw r11,4(r30)
  785. stfdu fp0,8(r31)
  786. lfd fp0,0(r30)
  787. bdnz BlitLargeAlignX // loop over all x
  788. stfdu fp0,8(r5)
  789. addic. r7,r7,-1 // decrement the counter
  790. add r3,r3,r9
  791. // src += sstride
  792. add r5,r5,r4
  793. // dst1 += dstride
  794. stfdu fp0,8(r31)
  795. add r31,r31,r4
  796. // dst2 += dstride
  797. bne BlitLargeAlignY // loop for all y
  798. lwz r30,-8(SP) // restore non-volatile regs
  799. lwz r31,-4(SP) // restore non-volatile regs
  800. blr // return to caller
  801. }
  802. void gr_bm_ubitblt_double(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap *src, grs_bitmap *dest)
  803. {
  804. ubyte * dbits;
  805. ubyte * sbits;
  806. int dstep, i;
  807. sbits = src->bm_data + (src->bm_rowsize * sy) + sx;
  808. dbits = dest->bm_data + (dest->bm_rowsize * dy) + dx;
  809. dstep = dest->bm_rowsize << gr_bitblt_dest_step_shift;
  810. Assert( !((int)dbits & 0x7) ); // assert to check double word alignment
  811. BlitLargeAlign(sbits, dstep, dbits, src->bm_w, src->bm_h, src->bm_rowsize);
  812. }
  813. // w and h are the doubled width and height
  814. void gr_bm_ubitblt_double_slow(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap *src, grs_bitmap *dest)
  815. {
  816. ubyte * dbits;
  817. ubyte * sbits;
  818. int dstep, i, j;
  819. sbits = src->bm_data + (src->bm_rowsize * sy) + sx;
  820. dbits = dest->bm_data + (dest->bm_rowsize * dy) + dx;
  821. dstep = dest->bm_rowsize << gr_bitblt_dest_step_shift;
  822. for (i=0; i < h; i++ ) {
  823. gr_linear_movsd_double(sbits, dbits, w);
  824. dbits += dstep;
  825. if (i & 1)
  826. sbits += src->bm_rowsize;
  827. }
  828. }
  829. #endif
  830. // Clipped bitmap ...
  831. void gr_bitmap( int x, int y, grs_bitmap *bm )
  832. {
  833. int dx1=x, dx2=x+bm->bm_w-1;
  834. int dy1=y, dy2=y+bm->bm_h-1;
  835. int sx=0, sy=0;
  836. if ((dx1 >= grd_curcanv->cv_bitmap.bm_w ) || (dx2 < 0)) return;
  837. if ((dy1 >= grd_curcanv->cv_bitmap.bm_h) || (dy2 < 0)) return;
  838. if ( dx1 < 0 ) { sx = -dx1; dx1 = 0; }
  839. if ( dy1 < 0 ) { sy = -dy1; dy1 = 0; }
  840. if ( dx2 >= grd_curcanv->cv_bitmap.bm_w ) { dx2 = grd_curcanv->cv_bitmap.bm_w-1; }
  841. if ( dy2 >= grd_curcanv->cv_bitmap.bm_h ) { dy2 = grd_curcanv->cv_bitmap.bm_h-1; }
  842. // Draw bitmap bm[x,y] into (dx1,dy1)-(dx2,dy2)
  843. gr_bm_ubitblt(dx2-dx1+1,dy2-dy1+1, dx1, dy1, sx, sy, bm, &grd_curcanv->cv_bitmap );
  844. }
  845. //-NOT-used // From linear to SVGA
  846. //-NOT-used void gr_bm_ubitblt02_2x(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  847. //-NOT-used {
  848. //-NOT-used unsigned char * sbits;
  849. //-NOT-used
  850. //-NOT-used unsigned int offset, EndingOffset, VideoLocation;
  851. //-NOT-used
  852. //-NOT-used int sbpr, dbpr, y1, page, BytesToMove;
  853. //-NOT-used
  854. //-NOT-used sbpr = src->bm_rowsize;
  855. //-NOT-used
  856. //-NOT-used dbpr = dest->bm_rowsize << gr_bitblt_dest_step_shift;
  857. //-NOT-used
  858. //-NOT-used VideoLocation = (unsigned int)dest->bm_data + (dest->bm_rowsize * dy) + dx;
  859. //-NOT-used
  860. //-NOT-used sbits = src->bm_data + ( sbpr*sy ) + sx;
  861. //-NOT-used
  862. //-NOT-used for (y1=0; y1 < h; y1++ ) {
  863. //-NOT-used
  864. //-NOT-used page = VideoLocation >> 16;
  865. //-NOT-used offset = VideoLocation & 0xFFFF;
  866. //-NOT-used
  867. //-NOT-used gr_vesa_setpage( page );
  868. //-NOT-used
  869. //-NOT-used EndingOffset = offset+w-1;
  870. //-NOT-used
  871. //-NOT-used if ( EndingOffset <= 0xFFFF )
  872. //-NOT-used {
  873. //-NOT-used gr_linear_rep_movsd_2x( (void *)sbits, (void *)(offset+0xA0000), w );
  874. //-NOT-used
  875. //-NOT-used VideoLocation += dbpr;
  876. //-NOT-used sbits += sbpr;
  877. //-NOT-used }
  878. //-NOT-used else
  879. //-NOT-used {
  880. //-NOT-used BytesToMove = 0xFFFF-offset+1;
  881. //-NOT-used
  882. //-NOT-used gr_linear_rep_movsd_2x( (void *)sbits, (void *)(offset+0xA0000), BytesToMove );
  883. //-NOT-used
  884. //-NOT-used page++;
  885. //-NOT-used gr_vesa_setpage(page);
  886. //-NOT-used
  887. //-NOT-used gr_linear_rep_movsd_2x( (void *)(sbits+BytesToMove/2), (void *)0xA0000, EndingOffset - 0xFFFF );
  888. //-NOT-used
  889. //-NOT-used VideoLocation += dbpr;
  890. //-NOT-used sbits += sbpr;
  891. //-NOT-used }
  892. //-NOT-used
  893. //-NOT-used
  894. //-NOT-used }
  895. //-NOT-used }
  896. //-NOT-used // From Linear to Linear
  897. //-NOT-used void gr_bm_ubitblt00_2x(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  898. //-NOT-used {
  899. //-NOT-used unsigned char * dbits;
  900. //-NOT-used unsigned char * sbits;
  901. //-NOT-used //int src_bm_rowsize_2, dest_bm_rowsize_2;
  902. //-NOT-used
  903. //-NOT-used int i;
  904. //-NOT-used
  905. //-NOT-used sbits = src->bm_data + (src->bm_rowsize * sy) + sx;
  906. //-NOT-used dbits = dest->bm_data + (dest->bm_rowsize * dy) + dx;
  907. //-NOT-used
  908. //-NOT-used // No interlacing, copy the whole buffer.
  909. //-NOT-used for (i=0; i < h; i++ ) {
  910. //-NOT-used gr_linear_rep_movsd_2x( sbits, dbits, w );
  911. //-NOT-used
  912. //-NOT-used sbits += src->bm_rowsize;
  913. //-NOT-used dbits += dest->bm_rowsize << gr_bitblt_dest_step_shift;
  914. //-NOT-used }
  915. //-NOT-used }
  916. void gr_bm_ubitblt00_rle(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  917. {
  918. unsigned char * dbits;
  919. unsigned char * sbits;
  920. int i, data_offset;
  921. data_offset = 1;
  922. if (src->bm_flags & BM_FLAG_RLE_BIG)
  923. data_offset = 2;
  924. sbits = &src->bm_data[4 + (src->bm_h*data_offset)];
  925. for (i=0; i<sy; i++ )
  926. sbits += (int)(INTEL_SHORT(src->bm_data[4+(i*data_offset)]));
  927. dbits = dest->bm_data + (dest->bm_rowsize * dy) + dx;
  928. // No interlacing, copy the whole buffer.
  929. for (i=0; i < h; i++ ) {
  930. gr_rle_expand_scanline( dbits, sbits, sx, sx+w-1 );
  931. if ( src->bm_flags & BM_FLAG_RLE_BIG )
  932. sbits += (int)INTEL_SHORT(*((short *)&(src->bm_data[4+((i+sy)*data_offset)])));
  933. else
  934. sbits += (int)(src->bm_data[4+i+sy]);
  935. dbits += dest->bm_rowsize << gr_bitblt_dest_step_shift;
  936. }
  937. }
  938. void gr_bm_ubitblt00m_rle(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  939. {
  940. unsigned char * dbits;
  941. unsigned char * sbits;
  942. int i, data_offset;
  943. data_offset = 1;
  944. if (src->bm_flags & BM_FLAG_RLE_BIG)
  945. data_offset = 2;
  946. sbits = &src->bm_data[4 + (src->bm_h*data_offset)];
  947. for (i=0; i<sy; i++ )
  948. sbits += (int)(INTEL_SHORT(src->bm_data[4+(i*data_offset)]));
  949. dbits = dest->bm_data + (dest->bm_rowsize * dy) + dx;
  950. // No interlacing, copy the whole buffer.
  951. for (i=0; i < h; i++ ) {
  952. gr_rle_expand_scanline_masked( dbits, sbits, sx, sx+w-1 );
  953. if ( src->bm_flags & BM_FLAG_RLE_BIG )
  954. sbits += (int)INTEL_SHORT(*((short *)&(src->bm_data[4+((i+sy)*data_offset)])));
  955. else
  956. sbits += (int)(src->bm_data[4+i+sy]);
  957. dbits += dest->bm_rowsize << gr_bitblt_dest_step_shift;
  958. }
  959. }
  960. // in rle.c
  961. extern void gr_rle_expand_scanline_generic( grs_bitmap * dest, int dx, int dy, ubyte *src, int x1, int x2 );
  962. extern void gr_rle_expand_scanline_generic_masked( grs_bitmap * dest, int dx, int dy, ubyte *src, int x1, int x2 );
  963. void gr_bm_ubitblt0x_rle(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  964. {
  965. int i, data_offset;
  966. register int y1;
  967. unsigned char * sbits;
  968. //mprintf( 0, "SVGA RLE!\n" );
  969. data_offset = 1;
  970. if (src->bm_flags & BM_FLAG_RLE_BIG)
  971. data_offset = 2;
  972. sbits = &src->bm_data[4 + (src->bm_h*data_offset)];
  973. for (i=0; i<sy; i++ )
  974. sbits += (int)(INTEL_SHORT(src->bm_data[4+(i*data_offset)]));
  975. for (y1=0; y1 < h; y1++ ) {
  976. gr_rle_expand_scanline_generic( dest, dx, dy+y1, sbits, sx, sx+w-1 );
  977. if ( src->bm_flags & BM_FLAG_RLE_BIG )
  978. sbits += (int)INTEL_SHORT(*((short *)&(src->bm_data[4+((y1+sy)*data_offset)])));
  979. else
  980. sbits += (int)src->bm_data[4+y1+sy];
  981. }
  982. }
  983. void gr_bm_ubitblt0xm_rle(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  984. {
  985. int i, data_offset;
  986. register int y1;
  987. unsigned char * sbits;
  988. //mprintf( 0, "SVGA RLE!\n" );
  989. data_offset = 1;
  990. if (src->bm_flags & BM_FLAG_RLE_BIG)
  991. data_offset = 2;
  992. sbits = &src->bm_data[4 + (src->bm_h*data_offset)];
  993. for (i=0; i<sy; i++ )
  994. sbits += (int)(INTEL_SHORT(src->bm_data[4+(i*data_offset)]));
  995. for (y1=0; y1 < h; y1++ ) {
  996. gr_rle_expand_scanline_generic_masked( dest, dx, dy+y1, sbits, sx, sx+w-1 );
  997. if ( src->bm_flags & BM_FLAG_RLE_BIG )
  998. sbits += (int)INTEL_SHORT(*((short *)&(src->bm_data[4+((y1+sy)*data_offset)])));
  999. else
  1000. sbits += (int)src->bm_data[4+y1+sy];
  1001. }
  1002. }
  1003. void gr_bm_ubitblt02m_rle(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  1004. {
  1005. int i, data_offset;
  1006. register int y1;
  1007. unsigned char * sbits;
  1008. //mprintf( 0, "SVGA RLE!\n" );
  1009. data_offset = 1;
  1010. if (src->bm_flags & BM_FLAG_RLE_BIG)
  1011. data_offset = 2;
  1012. sbits = &src->bm_data[4 + (src->bm_h*data_offset)];
  1013. for (i=0; i<sy; i++ )
  1014. sbits += (int)(INTEL_SHORT(src->bm_data[4+(i*data_offset)]));
  1015. for (y1=0; y1 < h; y1++ ) {
  1016. gr_rle_expand_scanline_svga_masked( dest, dx, dy+y1, sbits, sx, sx+w-1 );
  1017. if ( src->bm_flags & BM_FLAG_RLE_BIG )
  1018. sbits += (int)INTEL_SHORT(*((short *)&(src->bm_data[4+((y1+sy)*data_offset)])));
  1019. else
  1020. sbits += (int)src->bm_data[4+y1+sy];
  1021. }
  1022. }
  1023. void gr_bm_ubitblt(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  1024. {
  1025. register int x1, y1;
  1026. if ( (src->bm_type == BM_LINEAR) && (dest->bm_type == BM_LINEAR ))
  1027. {
  1028. if ( src->bm_flags & BM_FLAG_RLE )
  1029. gr_bm_ubitblt00_rle( w, h, dx, dy, sx, sy, src, dest );
  1030. else
  1031. gr_bm_ubitblt00( w, h, dx, dy, sx, sy, src, dest );
  1032. return;
  1033. }
  1034. if ( (src->bm_flags & BM_FLAG_RLE ) && (src->bm_type == BM_LINEAR) ) {
  1035. gr_bm_ubitblt0x_rle(w, h, dx, dy, sx, sy, src, dest );
  1036. return;
  1037. }
  1038. if ( (src->bm_type == BM_LINEAR) && (dest->bm_type == BM_SVGA ))
  1039. {
  1040. gr_bm_ubitblt02( w, h, dx, dy, sx, sy, src, dest );
  1041. return;
  1042. }
  1043. if ( (src->bm_type == BM_SVGA) && (dest->bm_type == BM_LINEAR ))
  1044. {
  1045. gr_bm_ubitblt20( w, h, dx, dy, sx, sy, src, dest );
  1046. return;
  1047. }
  1048. if ( (src->bm_type == BM_LINEAR) && (dest->bm_type == BM_MODEX ))
  1049. {
  1050. gr_bm_ubitblt01( w, h, dx+XOFFSET, dy+YOFFSET, sx, sy, src, dest );
  1051. return;
  1052. }
  1053. #if defined(POLY_ACC)
  1054. if ( (src->bm_type == BM_LINEAR) && (dest->bm_type == BM_LINEAR15 ))
  1055. {
  1056. ubyte *s = src->bm_data + sy * src->bm_rowsize + sx;
  1057. ushort *t = (ushort *)(dest->bm_data + dy * dest->bm_rowsize + dx * PA_BPP);
  1058. int x;
  1059. pa_flush();
  1060. for(;h--;)
  1061. {
  1062. for(x = 0; x < w; x++)
  1063. t[x] = pa_clut[s[x]];
  1064. s += src->bm_rowsize;
  1065. t += dest->bm_rowsize / PA_BPP;
  1066. }
  1067. return;
  1068. }
  1069. if ( (src->bm_type == BM_LINEAR15) && (dest->bm_type == BM_LINEAR15 ))
  1070. {
  1071. pa_blit(dest, dx, dy, src, sx, sy, w, h);
  1072. return;
  1073. }
  1074. #endif
  1075. for (y1=0; y1 < h; y1++ ) {
  1076. for (x1=0; x1 < w; x1++ ) {
  1077. gr_bm_pixel( dest, dx+x1, dy+y1, gr_gpixel(src,sx+x1,sy+y1) );
  1078. }
  1079. }
  1080. }
  1081. void gr_bm_bitblt(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  1082. {
  1083. int dx1=dx, dx2=dx+dest->bm_w-1;
  1084. int dy1=dy, dy2=dy+dest->bm_h-1;
  1085. int sx1=sx, sx2=sx+src->bm_w-1;
  1086. int sy1=sy, sy2=sy+src->bm_h-1;
  1087. if ((dx1 >= dest->bm_w ) || (dx2 < 0)) return;
  1088. if ((dy1 >= dest->bm_h ) || (dy2 < 0)) return;
  1089. if ( dx1 < 0 ) { sx1 += -dx1; dx1 = 0; }
  1090. if ( dy1 < 0 ) { sy1 += -dy1; dy1 = 0; }
  1091. if ( dx2 >= dest->bm_w ) { dx2 = dest->bm_w-1; }
  1092. if ( dy2 >= dest->bm_h ) { dy2 = dest->bm_h-1; }
  1093. if ((sx1 >= src->bm_w ) || (sx2 < 0)) return;
  1094. if ((sy1 >= src->bm_h ) || (sy2 < 0)) return;
  1095. if ( sx1 < 0 ) { dx1 += -sx1; sx1 = 0; }
  1096. if ( sy1 < 0 ) { dy1 += -sy1; sy1 = 0; }
  1097. if ( sx2 >= src->bm_w ) { sx2 = src->bm_w-1; }
  1098. if ( sy2 >= src->bm_h ) { sy2 = src->bm_h-1; }
  1099. // Draw bitmap bm[x,y] into (dx1,dy1)-(dx2,dy2)
  1100. if ( dx2-dx1+1 < w )
  1101. w = dx2-dx1+1;
  1102. if ( dy2-dy1+1 < h )
  1103. h = dy2-dy1+1;
  1104. if ( sx2-sx1+1 < w )
  1105. w = sx2-sx1+1;
  1106. if ( sy2-sy1+1 < h )
  1107. h = sy2-sy1+1;
  1108. gr_bm_ubitblt(w,h, dx1, dy1, sx1, sy1, src, dest );
  1109. }
  1110. void gr_ubitmap( int x, int y, grs_bitmap *bm )
  1111. { int source, dest;
  1112. source = bm->bm_type;
  1113. dest = TYPE;
  1114. if (source==BM_LINEAR) {
  1115. switch( dest )
  1116. {
  1117. case BM_LINEAR:
  1118. if ( bm->bm_flags & BM_FLAG_RLE )
  1119. gr_bm_ubitblt00_rle(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap );
  1120. else
  1121. gr_ubitmap00( x, y, bm );
  1122. return;
  1123. case BM_SVGA:
  1124. if ( bm->bm_flags & BM_FLAG_RLE )
  1125. gr_bm_ubitblt0x_rle(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap );
  1126. else
  1127. gr_vesa_bitmap( bm, &grd_curcanv->cv_bitmap, x, y );
  1128. return;
  1129. case BM_MODEX:
  1130. gr_bm_ubitblt01(bm->bm_w, bm->bm_h, x+XOFFSET, y+YOFFSET, 0, 0, bm, &grd_curcanv->cv_bitmap);
  1131. return;
  1132. #if defined(POLY_ACC)
  1133. case BM_LINEAR15:
  1134. if ( bm->bm_flags & BM_FLAG_RLE )
  1135. gr_bm_ubitblt05_rle(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap );
  1136. else
  1137. gr_ubitmap05( x, y, bm);
  1138. return;
  1139. #endif
  1140. default:
  1141. gr_ubitmap012( x, y, bm );
  1142. return;
  1143. }
  1144. } else {
  1145. gr_ubitmapGENERIC(x, y, bm);
  1146. }
  1147. }
  1148. void gr_ubitmapm( int x, int y, grs_bitmap *bm )
  1149. { int source, dest;
  1150. source = bm->bm_type;
  1151. dest = TYPE;
  1152. Assert(x+bm->bm_w <= grd_curcanv->cv_w);
  1153. Assert(y+bm->bm_h <= grd_curcanv->cv_h);
  1154. #ifdef _3DFX
  1155. _3dfx_Blit( x, y, bm );
  1156. if ( _3dfx_skip_ddraw )
  1157. return;
  1158. #endif
  1159. if (source==BM_LINEAR) {
  1160. switch( dest )
  1161. {
  1162. case BM_LINEAR:
  1163. if ( bm->bm_flags & BM_FLAG_RLE )
  1164. gr_bm_ubitblt00m_rle(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap );
  1165. else
  1166. gr_ubitmap00m( x, y, bm );
  1167. return;
  1168. case BM_SVGA:
  1169. if (bm->bm_flags & BM_FLAG_RLE)
  1170. gr_bm_ubitblt02m_rle(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap);
  1171. //gr_bm_ubitblt0xm_rle(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap);
  1172. else
  1173. gr_bm_ubitblt02m(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap);
  1174. //gr_ubitmapGENERICm(x, y, bm);
  1175. return;
  1176. case BM_MODEX:
  1177. gr_bm_ubitblt01m(bm->bm_w, bm->bm_h, x+XOFFSET, y+YOFFSET, 0, 0, bm, &grd_curcanv->cv_bitmap);
  1178. return;
  1179. #if defined(POLY_ACC)
  1180. case BM_LINEAR15:
  1181. if ( bm->bm_flags & BM_FLAG_RLE )
  1182. gr_bm_ubitblt05m_rle(bm->bm_w, bm->bm_h, x, y, 0, 0, bm, &grd_curcanv->cv_bitmap );
  1183. else
  1184. gr_ubitmap05m( x, y, bm );
  1185. return;
  1186. #endif
  1187. default:
  1188. gr_ubitmap012m( x, y, bm );
  1189. return;
  1190. }
  1191. } else {
  1192. gr_ubitmapGENERICm(x, y, bm);
  1193. }
  1194. }
  1195. void gr_bitmapm( int x, int y, grs_bitmap *bm )
  1196. {
  1197. int dx1=x, dx2=x+bm->bm_w-1;
  1198. int dy1=y, dy2=y+bm->bm_h-1;
  1199. int sx=0, sy=0;
  1200. if ((dx1 >= grd_curcanv->cv_bitmap.bm_w ) || (dx2 < 0)) return;
  1201. if ((dy1 >= grd_curcanv->cv_bitmap.bm_h) || (dy2 < 0)) return;
  1202. if ( dx1 < 0 ) { sx = -dx1; dx1 = 0; }
  1203. if ( dy1 < 0 ) { sy = -dy1; dy1 = 0; }
  1204. if ( dx2 >= grd_curcanv->cv_bitmap.bm_w ) { dx2 = grd_curcanv->cv_bitmap.bm_w-1; }
  1205. if ( dy2 >= grd_curcanv->cv_bitmap.bm_h ) { dy2 = grd_curcanv->cv_bitmap.bm_h-1; }
  1206. // Draw bitmap bm[x,y] into (dx1,dy1)-(dx2,dy2)
  1207. if ( (bm->bm_type == BM_LINEAR) && (grd_curcanv->cv_bitmap.bm_type == BM_LINEAR ))
  1208. {
  1209. if ( bm->bm_flags & BM_FLAG_RLE )
  1210. gr_bm_ubitblt00m_rle(dx2-dx1+1,dy2-dy1+1, dx1, dy1, sx, sy, bm, &grd_curcanv->cv_bitmap );
  1211. else
  1212. gr_bm_ubitblt00m(dx2-dx1+1,dy2-dy1+1, dx1, dy1, sx, sy, bm, &grd_curcanv->cv_bitmap );
  1213. return;
  1214. }
  1215. else if ( (bm->bm_type == BM_LINEAR) && (grd_curcanv->cv_bitmap.bm_type == BM_SVGA ))
  1216. {
  1217. gr_bm_ubitblt02m(dx2-dx1+1,dy2-dy1+1, dx1, dy1, sx, sy, bm, &grd_curcanv->cv_bitmap );
  1218. return;
  1219. }
  1220. gr_bm_ubitbltm(dx2-dx1+1,dy2-dy1+1, dx1, dy1, sx, sy, bm, &grd_curcanv->cv_bitmap );
  1221. }
  1222. void gr_bm_ubitbltm(int w, int h, int dx, int dy, int sx, int sy, grs_bitmap * src, grs_bitmap * dest)
  1223. {
  1224. register int x1, y1;
  1225. ubyte c;
  1226. #if defined(POLY_ACC)
  1227. if(src->bm_type == BM_LINEAR && dest->bm_type == BM_LINEAR15)
  1228. {
  1229. ubyte *s;
  1230. ushort *d;
  1231. ushort u;
  1232. int smod, dmod;
  1233. pa_flush();
  1234. s = (ubyte *)(src->bm_data + src->bm_rowsize * sy + sx);
  1235. smod = src->bm_rowsize - w;
  1236. d = (ushort *)(dest->bm_data + dest->bm_rowsize * dy + dx * PA_BPP);
  1237. dmod = dest->bm_rowsize / PA_BPP - w;
  1238. for (; h--;) {
  1239. for (x1=w; x1--; ) {
  1240. if ((u = *s) != TRANSPARENCY_COLOR)
  1241. *d = pa_clut[u];
  1242. ++s;
  1243. ++d;
  1244. }
  1245. s += smod;
  1246. d += dmod;
  1247. }
  1248. }
  1249. if(src->bm_type == BM_LINEAR15)
  1250. {
  1251. Assert(src->bm_type == dest->bm_type); // I don't support 15 to 8 yet.
  1252. pa_blit_transparent(dest, dx, dy, src, sx, sy, w, h);
  1253. return;
  1254. }
  1255. #endif
  1256. for (y1=0; y1 < h; y1++ ) {
  1257. for (x1=0; x1 < w; x1++ ) {
  1258. if ((c=gr_gpixel(src,sx+x1,sy+y1))!=TRANSPARENCY_COLOR)
  1259. gr_bm_pixel( dest, dx+x1, dy+y1,c );
  1260. }
  1261. }
  1262. }