ARMAssembler.h 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. /*
  2. * Copyright (C) 2009, 2010 University of Szeged
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. *
  14. * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
  15. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  16. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  17. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
  18. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  19. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  20. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  21. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  22. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #ifndef ARMAssembler_h
  27. #define ARMAssembler_h
  28. #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
  29. #include "AssemblerBufferWithConstantPool.h"
  30. #include "JITCompilationEffort.h"
  31. #include <wtf/Assertions.h>
  32. namespace JSC {
  33. typedef uint32_t ARMWord;
  34. namespace ARMRegisters {
  35. typedef enum {
  36. r0 = 0,
  37. r1,
  38. r2,
  39. r3, S0 = r3, /* Same as thumb assembler. */
  40. r4,
  41. r5,
  42. r6,
  43. r7,
  44. r8,
  45. r9,
  46. r10,
  47. r11,
  48. r12, S1 = r12,
  49. r13, sp = r13,
  50. r14, lr = r14,
  51. r15, pc = r15
  52. } RegisterID;
  53. typedef enum {
  54. d0,
  55. d1,
  56. d2,
  57. d3,
  58. d4,
  59. d5,
  60. d6,
  61. d7, SD0 = d7, /* Same as thumb assembler. */
  62. d8,
  63. d9,
  64. d10,
  65. d11,
  66. d12,
  67. d13,
  68. d14,
  69. d15,
  70. d16,
  71. d17,
  72. d18,
  73. d19,
  74. d20,
  75. d21,
  76. d22,
  77. d23,
  78. d24,
  79. d25,
  80. d26,
  81. d27,
  82. d28,
  83. d29,
  84. d30,
  85. d31
  86. } FPRegisterID;
  87. } // namespace ARMRegisters
  88. class ARMAssembler {
  89. public:
  90. typedef ARMRegisters::RegisterID RegisterID;
  91. typedef ARMRegisters::FPRegisterID FPRegisterID;
  92. typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
  93. typedef SegmentedVector<AssemblerLabel, 64> Jumps;
  94. ARMAssembler()
  95. : m_indexOfTailOfLastWatchpoint(1)
  96. {
  97. }
  98. // ARM conditional constants
  99. typedef enum {
  100. EQ = 0x00000000, // Zero / Equal.
  101. NE = 0x10000000, // Non-zero / Not equal.
  102. CS = 0x20000000, // Unsigned higher or same.
  103. CC = 0x30000000, // Unsigned lower.
  104. MI = 0x40000000, // Negative.
  105. PL = 0x50000000, // Positive or zero.
  106. VS = 0x60000000, // Overflowed.
  107. VC = 0x70000000, // Not overflowed.
  108. HI = 0x80000000, // Unsigned higher.
  109. LS = 0x90000000, // Unsigned lower or same.
  110. GE = 0xa0000000, // Signed greater than or equal.
  111. LT = 0xb0000000, // Signed less than.
  112. GT = 0xc0000000, // Signed greater than.
  113. LE = 0xd0000000, // Signed less than or equal.
  114. AL = 0xe0000000 // Unconditional / Always execute.
  115. } Condition;
  116. // ARM instruction constants
  117. enum {
  118. AND = (0x0 << 21),
  119. EOR = (0x1 << 21),
  120. SUB = (0x2 << 21),
  121. RSB = (0x3 << 21),
  122. ADD = (0x4 << 21),
  123. ADC = (0x5 << 21),
  124. SBC = (0x6 << 21),
  125. RSC = (0x7 << 21),
  126. TST = (0x8 << 21),
  127. TEQ = (0x9 << 21),
  128. CMP = (0xa << 21),
  129. CMN = (0xb << 21),
  130. ORR = (0xc << 21),
  131. MOV = (0xd << 21),
  132. BIC = (0xe << 21),
  133. MVN = (0xf << 21),
  134. MUL = 0x00000090,
  135. MULL = 0x00c00090,
  136. VMOV_F64 = 0x0eb00b40,
  137. VADD_F64 = 0x0e300b00,
  138. VDIV_F64 = 0x0e800b00,
  139. VSUB_F64 = 0x0e300b40,
  140. VMUL_F64 = 0x0e200b00,
  141. VCMP_F64 = 0x0eb40b40,
  142. VSQRT_F64 = 0x0eb10bc0,
  143. VABS_F64 = 0x0eb00bc0,
  144. VNEG_F64 = 0x0eb10b40,
  145. STMDB = 0x09200000,
  146. LDMIA = 0x08b00000,
  147. B = 0x0a000000,
  148. BL = 0x0b000000,
  149. BX = 0x012fff10,
  150. VMOV_VFP64 = 0x0c400a10,
  151. VMOV_ARM64 = 0x0c500a10,
  152. VMOV_VFP32 = 0x0e000a10,
  153. VMOV_ARM32 = 0x0e100a10,
  154. VCVT_F64_S32 = 0x0eb80bc0,
  155. VCVT_S32_F64 = 0x0ebd0bc0,
  156. VCVT_U32_F64 = 0x0ebc0bc0,
  157. VCVT_F32_F64 = 0x0eb70bc0,
  158. VCVT_F64_F32 = 0x0eb70ac0,
  159. VMRS_APSR = 0x0ef1fa10,
  160. CLZ = 0x016f0f10,
  161. BKPT = 0xe1200070,
  162. BLX = 0x012fff30,
  163. #if WTF_ARM_ARCH_AT_LEAST(7)
  164. MOVW = 0x03000000,
  165. MOVT = 0x03400000,
  166. #endif
  167. NOP = 0xe1a00000,
  168. };
  169. enum {
  170. Op2Immediate = (1 << 25),
  171. ImmediateForHalfWordTransfer = (1 << 22),
  172. Op2InvertedImmediate = (1 << 26),
  173. SetConditionalCodes = (1 << 20),
  174. Op2IsRegisterArgument = (1 << 25),
  175. // Data transfer flags.
  176. DataTransferUp = (1 << 23),
  177. DataTransferWriteBack = (1 << 21),
  178. DataTransferPostUpdate = (1 << 24),
  179. DataTransferLoad = (1 << 20),
  180. ByteDataTransfer = (1 << 22),
  181. };
  182. enum DataTransferTypeA {
  183. LoadUint32 = 0x05000000 | DataTransferLoad,
  184. LoadUint8 = 0x05400000 | DataTransferLoad,
  185. StoreUint32 = 0x05000000,
  186. StoreUint8 = 0x05400000,
  187. };
  188. enum DataTransferTypeB {
  189. LoadUint16 = 0x010000b0 | DataTransferLoad,
  190. LoadInt16 = 0x010000f0 | DataTransferLoad,
  191. LoadInt8 = 0x010000d0 | DataTransferLoad,
  192. StoreUint16 = 0x010000b0,
  193. };
  194. enum DataTransferTypeFloat {
  195. LoadFloat = 0x0d000a00 | DataTransferLoad,
  196. LoadDouble = 0x0d000b00 | DataTransferLoad,
  197. StoreFloat = 0x0d000a00,
  198. StoreDouble = 0x0d000b00,
  199. };
  200. // Masks of ARM instructions
  201. enum {
  202. BranchOffsetMask = 0x00ffffff,
  203. ConditionalFieldMask = 0xf0000000,
  204. DataTransferOffsetMask = 0xfff,
  205. };
  206. enum {
  207. MinimumBranchOffsetDistance = -0x00800000,
  208. MaximumBranchOffsetDistance = 0x007fffff,
  209. };
  210. enum {
  211. padForAlign8 = 0x00,
  212. padForAlign16 = 0x0000,
  213. padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
  214. };
  215. static const ARMWord InvalidImmediate = 0xf0000000;
  216. static const ARMWord InvalidBranchTarget = 0xffffffff;
  217. static const int DefaultPrefetchOffset = 2;
  218. static const ARMWord BlxInstructionMask = 0x012fff30;
  219. static const ARMWord LdrOrAddInstructionMask = 0x0ff00000;
  220. static const ARMWord LdrPcImmediateInstructionMask = 0x0f7f0000;
  221. static const ARMWord AddImmediateInstruction = 0x02800000;
  222. static const ARMWord BlxInstruction = 0x012fff30;
  223. static const ARMWord LdrImmediateInstruction = 0x05900000;
  224. static const ARMWord LdrPcImmediateInstruction = 0x051f0000;
  225. // Instruction formating
  226. void emitInstruction(ARMWord op, int rd, int rn, ARMWord op2)
  227. {
  228. ASSERT(((op2 & ~Op2Immediate) <= 0xfff) || (((op2 & ~ImmediateForHalfWordTransfer) <= 0xfff)));
  229. m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
  230. }
  231. void emitDoublePrecisionInstruction(ARMWord op, int dd, int dn, int dm)
  232. {
  233. ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
  234. m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
  235. | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
  236. | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
  237. }
  238. void emitSinglePrecisionInstruction(ARMWord op, int sd, int sn, int sm)
  239. {
  240. ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
  241. m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
  242. | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
  243. | (sm >> 1) | ((sm & 0x1) << 5));
  244. }
  245. void bitAnd(int rd, int rn, ARMWord op2, Condition cc = AL)
  246. {
  247. emitInstruction(toARMWord(cc) | AND, rd, rn, op2);
  248. }
  249. void bitAnds(int rd, int rn, ARMWord op2, Condition cc = AL)
  250. {
  251. emitInstruction(toARMWord(cc) | AND | SetConditionalCodes, rd, rn, op2);
  252. }
  253. void eor(int rd, int rn, ARMWord op2, Condition cc = AL)
  254. {
  255. emitInstruction(toARMWord(cc) | EOR, rd, rn, op2);
  256. }
  257. void eors(int rd, int rn, ARMWord op2, Condition cc = AL)
  258. {
  259. emitInstruction(toARMWord(cc) | EOR | SetConditionalCodes, rd, rn, op2);
  260. }
  261. void sub(int rd, int rn, ARMWord op2, Condition cc = AL)
  262. {
  263. emitInstruction(toARMWord(cc) | SUB, rd, rn, op2);
  264. }
  265. void subs(int rd, int rn, ARMWord op2, Condition cc = AL)
  266. {
  267. emitInstruction(toARMWord(cc) | SUB | SetConditionalCodes, rd, rn, op2);
  268. }
  269. void rsb(int rd, int rn, ARMWord op2, Condition cc = AL)
  270. {
  271. emitInstruction(toARMWord(cc) | RSB, rd, rn, op2);
  272. }
  273. void rsbs(int rd, int rn, ARMWord op2, Condition cc = AL)
  274. {
  275. emitInstruction(toARMWord(cc) | RSB | SetConditionalCodes, rd, rn, op2);
  276. }
  277. void add(int rd, int rn, ARMWord op2, Condition cc = AL)
  278. {
  279. emitInstruction(toARMWord(cc) | ADD, rd, rn, op2);
  280. }
  281. void adds(int rd, int rn, ARMWord op2, Condition cc = AL)
  282. {
  283. emitInstruction(toARMWord(cc) | ADD | SetConditionalCodes, rd, rn, op2);
  284. }
  285. void adc(int rd, int rn, ARMWord op2, Condition cc = AL)
  286. {
  287. emitInstruction(toARMWord(cc) | ADC, rd, rn, op2);
  288. }
  289. void adcs(int rd, int rn, ARMWord op2, Condition cc = AL)
  290. {
  291. emitInstruction(toARMWord(cc) | ADC | SetConditionalCodes, rd, rn, op2);
  292. }
  293. void sbc(int rd, int rn, ARMWord op2, Condition cc = AL)
  294. {
  295. emitInstruction(toARMWord(cc) | SBC, rd, rn, op2);
  296. }
  297. void sbcs(int rd, int rn, ARMWord op2, Condition cc = AL)
  298. {
  299. emitInstruction(toARMWord(cc) | SBC | SetConditionalCodes, rd, rn, op2);
  300. }
  301. void rsc(int rd, int rn, ARMWord op2, Condition cc = AL)
  302. {
  303. emitInstruction(toARMWord(cc) | RSC, rd, rn, op2);
  304. }
  305. void rscs(int rd, int rn, ARMWord op2, Condition cc = AL)
  306. {
  307. emitInstruction(toARMWord(cc) | RSC | SetConditionalCodes, rd, rn, op2);
  308. }
  309. void tst(int rn, ARMWord op2, Condition cc = AL)
  310. {
  311. emitInstruction(toARMWord(cc) | TST | SetConditionalCodes, 0, rn, op2);
  312. }
  313. void teq(int rn, ARMWord op2, Condition cc = AL)
  314. {
  315. emitInstruction(toARMWord(cc) | TEQ | SetConditionalCodes, 0, rn, op2);
  316. }
  317. void cmp(int rn, ARMWord op2, Condition cc = AL)
  318. {
  319. emitInstruction(toARMWord(cc) | CMP | SetConditionalCodes, 0, rn, op2);
  320. }
  321. void cmn(int rn, ARMWord op2, Condition cc = AL)
  322. {
  323. emitInstruction(toARMWord(cc) | CMN | SetConditionalCodes, 0, rn, op2);
  324. }
  325. void orr(int rd, int rn, ARMWord op2, Condition cc = AL)
  326. {
  327. emitInstruction(toARMWord(cc) | ORR, rd, rn, op2);
  328. }
  329. void orrs(int rd, int rn, ARMWord op2, Condition cc = AL)
  330. {
  331. emitInstruction(toARMWord(cc) | ORR | SetConditionalCodes, rd, rn, op2);
  332. }
  333. void mov(int rd, ARMWord op2, Condition cc = AL)
  334. {
  335. emitInstruction(toARMWord(cc) | MOV, rd, ARMRegisters::r0, op2);
  336. }
  337. #if WTF_ARM_ARCH_AT_LEAST(7)
  338. void movw(int rd, ARMWord op2, Condition cc = AL)
  339. {
  340. ASSERT((op2 | 0xf0fff) == 0xf0fff);
  341. m_buffer.putInt(toARMWord(cc) | MOVW | RD(rd) | op2);
  342. }
  343. void movt(int rd, ARMWord op2, Condition cc = AL)
  344. {
  345. ASSERT((op2 | 0xf0fff) == 0xf0fff);
  346. m_buffer.putInt(toARMWord(cc) | MOVT | RD(rd) | op2);
  347. }
  348. #endif
  349. void movs(int rd, ARMWord op2, Condition cc = AL)
  350. {
  351. emitInstruction(toARMWord(cc) | MOV | SetConditionalCodes, rd, ARMRegisters::r0, op2);
  352. }
  353. void bic(int rd, int rn, ARMWord op2, Condition cc = AL)
  354. {
  355. emitInstruction(toARMWord(cc) | BIC, rd, rn, op2);
  356. }
  357. void bics(int rd, int rn, ARMWord op2, Condition cc = AL)
  358. {
  359. emitInstruction(toARMWord(cc) | BIC | SetConditionalCodes, rd, rn, op2);
  360. }
  361. void mvn(int rd, ARMWord op2, Condition cc = AL)
  362. {
  363. emitInstruction(toARMWord(cc) | MVN, rd, ARMRegisters::r0, op2);
  364. }
  365. void mvns(int rd, ARMWord op2, Condition cc = AL)
  366. {
  367. emitInstruction(toARMWord(cc) | MVN | SetConditionalCodes, rd, ARMRegisters::r0, op2);
  368. }
  369. void mul(int rd, int rn, int rm, Condition cc = AL)
  370. {
  371. m_buffer.putInt(toARMWord(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
  372. }
  373. void muls(int rd, int rn, int rm, Condition cc = AL)
  374. {
  375. m_buffer.putInt(toARMWord(cc) | MUL | SetConditionalCodes | RN(rd) | RS(rn) | RM(rm));
  376. }
  377. void mull(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
  378. {
  379. m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
  380. }
  381. void vmov_f64(int dd, int dm, Condition cc = AL)
  382. {
  383. emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
  384. }
  385. void vadd_f64(int dd, int dn, int dm, Condition cc = AL)
  386. {
  387. emitDoublePrecisionInstruction(toARMWord(cc) | VADD_F64, dd, dn, dm);
  388. }
  389. void vdiv_f64(int dd, int dn, int dm, Condition cc = AL)
  390. {
  391. emitDoublePrecisionInstruction(toARMWord(cc) | VDIV_F64, dd, dn, dm);
  392. }
  393. void vsub_f64(int dd, int dn, int dm, Condition cc = AL)
  394. {
  395. emitDoublePrecisionInstruction(toARMWord(cc) | VSUB_F64, dd, dn, dm);
  396. }
  397. void vmul_f64(int dd, int dn, int dm, Condition cc = AL)
  398. {
  399. emitDoublePrecisionInstruction(toARMWord(cc) | VMUL_F64, dd, dn, dm);
  400. }
  401. void vcmp_f64(int dd, int dm, Condition cc = AL)
  402. {
  403. emitDoublePrecisionInstruction(toARMWord(cc) | VCMP_F64, dd, 0, dm);
  404. }
  405. void vsqrt_f64(int dd, int dm, Condition cc = AL)
  406. {
  407. emitDoublePrecisionInstruction(toARMWord(cc) | VSQRT_F64, dd, 0, dm);
  408. }
  409. void vabs_f64(int dd, int dm, Condition cc = AL)
  410. {
  411. emitDoublePrecisionInstruction(toARMWord(cc) | VABS_F64, dd, 0, dm);
  412. }
  413. void vneg_f64(int dd, int dm, Condition cc = AL)
  414. {
  415. emitDoublePrecisionInstruction(toARMWord(cc) | VNEG_F64, dd, 0, dm);
  416. }
  417. void ldrImmediate(int rd, ARMWord imm, Condition cc = AL)
  418. {
  419. m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm, true);
  420. }
  421. void ldrUniqueImmediate(int rd, ARMWord imm, Condition cc = AL)
  422. {
  423. m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm);
  424. }
  425. void dtrUp(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
  426. {
  427. emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
  428. }
  429. void dtrUpRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
  430. {
  431. emitInstruction(toARMWord(cc) | transferType | DataTransferUp | Op2IsRegisterArgument, rd, rb, rm);
  432. }
  433. void dtrDown(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
  434. {
  435. emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
  436. }
  437. void dtrDownRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
  438. {
  439. emitInstruction(toARMWord(cc) | transferType | Op2IsRegisterArgument, rd, rb, rm);
  440. }
  441. void halfDtrUp(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
  442. {
  443. emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
  444. }
  445. void halfDtrUpRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
  446. {
  447. emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rn, rm);
  448. }
  449. void halfDtrDown(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
  450. {
  451. emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
  452. }
  453. void halfDtrDownRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
  454. {
  455. emitInstruction(toARMWord(cc) | transferType, rd, rn, rm);
  456. }
  457. void doubleDtrUp(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
  458. {
  459. ASSERT(op2 <= 0xff && rd <= 15);
  460. /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
  461. m_buffer.putInt(toARMWord(cc) | DataTransferUp | type | (rd << 12) | RN(rb) | op2);
  462. }
  463. void doubleDtrDown(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
  464. {
  465. ASSERT(op2 <= 0xff && rd <= 15);
  466. /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
  467. m_buffer.putInt(toARMWord(cc) | type | (rd << 12) | RN(rb) | op2);
  468. }
  469. void push(int reg, Condition cc = AL)
  470. {
  471. ASSERT(ARMWord(reg) <= 0xf);
  472. m_buffer.putInt(toARMWord(cc) | StoreUint32 | DataTransferWriteBack | RN(ARMRegisters::sp) | RD(reg) | 0x4);
  473. }
  474. void pop(int reg, Condition cc = AL)
  475. {
  476. ASSERT(ARMWord(reg) <= 0xf);
  477. m_buffer.putInt(toARMWord(cc) | (LoadUint32 ^ DataTransferPostUpdate) | DataTransferUp | RN(ARMRegisters::sp) | RD(reg) | 0x4);
  478. }
  479. inline void poke(int reg, Condition cc = AL)
  480. {
  481. dtrDown(StoreUint32, ARMRegisters::sp, 0, reg, cc);
  482. }
  483. inline void peek(int reg, Condition cc = AL)
  484. {
  485. dtrUp(LoadUint32, reg, ARMRegisters::sp, 0, cc);
  486. }
  487. void vmov_vfp64(int sm, int rt, int rt2, Condition cc = AL)
  488. {
  489. ASSERT(rt != rt2);
  490. m_buffer.putInt(toARMWord(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
  491. }
  492. void vmov_arm64(int rt, int rt2, int sm, Condition cc = AL)
  493. {
  494. ASSERT(rt != rt2);
  495. m_buffer.putInt(toARMWord(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
  496. }
  497. void vmov_vfp32(int sn, int rt, Condition cc = AL)
  498. {
  499. ASSERT(rt <= 15);
  500. emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_VFP32, rt << 1, sn, 0);
  501. }
  502. void vmov_arm32(int rt, int sn, Condition cc = AL)
  503. {
  504. ASSERT(rt <= 15);
  505. emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_ARM32, rt << 1, sn, 0);
  506. }
  507. void vcvt_f64_s32(int dd, int sm, Condition cc = AL)
  508. {
  509. ASSERT(!(sm & 0x1)); // sm must be divisible by 2
  510. emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
  511. }
  512. void vcvt_s32_f64(int sd, int dm, Condition cc = AL)
  513. {
  514. ASSERT(!(sd & 0x1)); // sd must be divisible by 2
  515. emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
  516. }
  517. void vcvt_u32_f64(int sd, int dm, Condition cc = AL)
  518. {
  519. ASSERT(!(sd & 0x1)); // sd must be divisible by 2
  520. emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_U32_F64, (sd >> 1), 0, dm);
  521. }
  522. void vcvt_f64_f32(int dd, int sm, Condition cc = AL)
  523. {
  524. ASSERT(dd <= 15 && sm <= 15);
  525. emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_F32, dd, 0, sm);
  526. }
  527. void vcvt_f32_f64(int dd, int sm, Condition cc = AL)
  528. {
  529. ASSERT(dd <= 15 && sm <= 15);
  530. emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F32_F64, dd, 0, sm);
  531. }
  532. void vmrs_apsr(Condition cc = AL)
  533. {
  534. m_buffer.putInt(toARMWord(cc) | VMRS_APSR);
  535. }
  536. void clz(int rd, int rm, Condition cc = AL)
  537. {
  538. m_buffer.putInt(toARMWord(cc) | CLZ | RD(rd) | RM(rm));
  539. }
  540. void bkpt(ARMWord value)
  541. {
  542. m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
  543. }
  544. void nop()
  545. {
  546. m_buffer.putInt(NOP);
  547. }
  548. void bx(int rm, Condition cc = AL)
  549. {
  550. emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm));
  551. }
  552. AssemblerLabel blx(int rm, Condition cc = AL)
  553. {
  554. emitInstruction(toARMWord(cc) | BLX, 0, 0, RM(rm));
  555. return m_buffer.label();
  556. }
  557. static ARMWord lsl(int reg, ARMWord value)
  558. {
  559. ASSERT(reg <= ARMRegisters::pc);
  560. ASSERT(value <= 0x1f);
  561. return reg | (value << 7) | 0x00;
  562. }
  563. static ARMWord lsr(int reg, ARMWord value)
  564. {
  565. ASSERT(reg <= ARMRegisters::pc);
  566. ASSERT(value <= 0x1f);
  567. return reg | (value << 7) | 0x20;
  568. }
  569. static ARMWord asr(int reg, ARMWord value)
  570. {
  571. ASSERT(reg <= ARMRegisters::pc);
  572. ASSERT(value <= 0x1f);
  573. return reg | (value << 7) | 0x40;
  574. }
  575. static ARMWord lslRegister(int reg, int shiftReg)
  576. {
  577. ASSERT(reg <= ARMRegisters::pc);
  578. ASSERT(shiftReg <= ARMRegisters::pc);
  579. return reg | (shiftReg << 8) | 0x10;
  580. }
  581. static ARMWord lsrRegister(int reg, int shiftReg)
  582. {
  583. ASSERT(reg <= ARMRegisters::pc);
  584. ASSERT(shiftReg <= ARMRegisters::pc);
  585. return reg | (shiftReg << 8) | 0x30;
  586. }
  587. static ARMWord asrRegister(int reg, int shiftReg)
  588. {
  589. ASSERT(reg <= ARMRegisters::pc);
  590. ASSERT(shiftReg <= ARMRegisters::pc);
  591. return reg | (shiftReg << 8) | 0x50;
  592. }
  593. // General helpers
  594. size_t codeSize() const
  595. {
  596. return m_buffer.codeSize();
  597. }
  598. void ensureSpace(int insnSpace, int constSpace)
  599. {
  600. m_buffer.ensureSpace(insnSpace, constSpace);
  601. }
  602. int sizeOfConstantPool()
  603. {
  604. return m_buffer.sizeOfConstantPool();
  605. }
  606. AssemblerLabel labelIgnoringWatchpoints()
  607. {
  608. m_buffer.ensureSpaceForAnyInstruction();
  609. return m_buffer.label();
  610. }
  611. AssemblerLabel labelForWatchpoint()
  612. {
  613. m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord));
  614. AssemblerLabel result = m_buffer.label();
  615. if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize()))
  616. result = label();
  617. m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
  618. return label();
  619. }
  620. AssemblerLabel label()
  621. {
  622. AssemblerLabel result = labelIgnoringWatchpoints();
  623. while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) {
  624. nop();
  625. // The available number of instructions are ensured by labelForWatchpoint.
  626. result = m_buffer.label();
  627. }
  628. return result;
  629. }
  630. AssemblerLabel align(int alignment)
  631. {
  632. while (!m_buffer.isAligned(alignment))
  633. mov(ARMRegisters::r0, ARMRegisters::r0);
  634. return label();
  635. }
  636. AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
  637. {
  638. ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
  639. m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
  640. ldrUniqueImmediate(rd, InvalidBranchTarget, cc);
  641. return m_buffer.label();
  642. }
  643. AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
  644. {
  645. return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
  646. }
  647. PassRefPtr<ExecutableMemoryHandle> executableCopy(VM&, void* ownerUID, JITCompilationEffort);
  648. unsigned debugOffset() { return m_buffer.debugOffset(); }
  649. // DFG assembly helpers for moving data between fp and registers.
  650. void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
  651. {
  652. vmov_arm64(rd1, rd2, rn);
  653. }
  654. void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
  655. {
  656. vmov_vfp64(rd, rn1, rn2);
  657. }
  658. // Patching helpers
  659. static ARMWord* getLdrImmAddress(ARMWord* insn)
  660. {
  661. // Check for call
  662. if ((*insn & LdrPcImmediateInstructionMask) != LdrPcImmediateInstruction) {
  663. // Must be BLX
  664. ASSERT((*insn & BlxInstructionMask) == BlxInstruction);
  665. insn--;
  666. }
  667. // Must be an ldr ..., [pc +/- imm]
  668. ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
  669. ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetchOffset * sizeof(ARMWord);
  670. if (*insn & DataTransferUp)
  671. return reinterpret_cast<ARMWord*>(addr + (*insn & DataTransferOffsetMask));
  672. return reinterpret_cast<ARMWord*>(addr - (*insn & DataTransferOffsetMask));
  673. }
  674. static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
  675. {
  676. // Must be an ldr ..., [pc +/- imm]
  677. ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
  678. if (*insn & 0x1)
  679. return reinterpret_cast<ARMWord*>(constPool + ((*insn & DataTransferOffsetMask) >> 1));
  680. return getLdrImmAddress(insn);
  681. }
  682. static void patchPointerInternal(intptr_t from, void* to)
  683. {
  684. ARMWord* insn = reinterpret_cast<ARMWord*>(from);
  685. ARMWord* addr = getLdrImmAddress(insn);
  686. *addr = reinterpret_cast<ARMWord>(to);
  687. }
  688. static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
  689. {
  690. value = (value << 1) + 1;
  691. ASSERT(!(value & ~DataTransferOffsetMask));
  692. return (load & ~DataTransferOffsetMask) | value;
  693. }
  694. static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
  695. // Read pointers
  696. static void* readPointer(void* from)
  697. {
  698. ARMWord* instruction = reinterpret_cast<ARMWord*>(from);
  699. ARMWord* address = getLdrImmAddress(instruction);
  700. return *reinterpret_cast<void**>(address);
  701. }
  702. // Patch pointers
  703. static void linkPointer(void* code, AssemblerLabel from, void* to)
  704. {
  705. patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
  706. }
  707. static void repatchInt32(void* where, int32_t to)
  708. {
  709. patchPointerInternal(reinterpret_cast<intptr_t>(where), reinterpret_cast<void*>(to));
  710. }
  711. static void repatchCompact(void* where, int32_t value)
  712. {
  713. ARMWord* instruction = reinterpret_cast<ARMWord*>(where);
  714. ASSERT((*instruction & 0x0f700000) == LoadUint32);
  715. if (value >= 0)
  716. *instruction = (*instruction & 0xff7ff000) | DataTransferUp | value;
  717. else
  718. *instruction = (*instruction & 0xff7ff000) | -value;
  719. cacheFlush(instruction, sizeof(ARMWord));
  720. }
  721. static void repatchPointer(void* from, void* to)
  722. {
  723. patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
  724. }
  725. // Linkers
  726. static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
  727. {
  728. return reinterpret_cast<intptr_t>(base) + offset - sizeof(ARMWord);
  729. }
  730. void linkJump(AssemblerLabel from, AssemblerLabel to)
  731. {
  732. ARMWord* insn = reinterpret_cast<ARMWord*>(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
  733. ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
  734. *addr = toARMWord(to.m_offset);
  735. }
  736. static void linkJump(void* code, AssemblerLabel from, void* to)
  737. {
  738. patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
  739. }
  740. static void relinkJump(void* from, void* to)
  741. {
  742. patchPointerInternal(getAbsoluteJumpAddress(from), to);
  743. }
  744. static void linkCall(void* code, AssemblerLabel from, void* to)
  745. {
  746. patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
  747. }
  748. static void relinkCall(void* from, void* to)
  749. {
  750. patchPointerInternal(getAbsoluteJumpAddress(from), to);
  751. }
  752. static void* readCallTarget(void* from)
  753. {
  754. return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from))));
  755. }
  756. static void replaceWithJump(void* instructionStart, void* to)
  757. {
  758. ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
  759. intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + DefaultPrefetchOffset * sizeof(ARMWord));
  760. if (!(difference & 1)) {
  761. difference >>= 2;
  762. if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
  763. // Direct branch.
  764. instruction[0] = B | AL | (difference & BranchOffsetMask);
  765. cacheFlush(instruction, sizeof(ARMWord));
  766. return;
  767. }
  768. }
  769. // Load target.
  770. instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4;
  771. instruction[1] = reinterpret_cast<ARMWord>(to);
  772. cacheFlush(instruction, sizeof(ARMWord) * 2);
  773. }
  774. static ptrdiff_t maxJumpReplacementSize()
  775. {
  776. return sizeof(ARMWord) * 2;
  777. }
  778. static void replaceWithLoad(void* instructionStart)
  779. {
  780. ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
  781. cacheFlush(instruction, sizeof(ARMWord));
  782. ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
  783. if ((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction) {
  784. *instruction = (*instruction & ~LdrOrAddInstructionMask) | LdrImmediateInstruction;
  785. cacheFlush(instruction, sizeof(ARMWord));
  786. }
  787. }
  788. static void replaceWithAddressComputation(void* instructionStart)
  789. {
  790. ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
  791. cacheFlush(instruction, sizeof(ARMWord));
  792. ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
  793. if ((*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction) {
  794. *instruction = (*instruction & ~LdrOrAddInstructionMask) | AddImmediateInstruction;
  795. cacheFlush(instruction, sizeof(ARMWord));
  796. }
  797. }
  798. static void revertBranchPtrWithPatch(void* instructionStart, RegisterID rn, ARMWord imm)
  799. {
  800. ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
  801. ASSERT((instruction[2] & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
  802. instruction[0] = toARMWord(AL) | ((instruction[2] & 0x0fff0fff) + sizeof(ARMWord)) | RD(ARMRegisters::S1);
  803. *getLdrImmAddress(instruction) = imm;
  804. instruction[1] = toARMWord(AL) | CMP | SetConditionalCodes | RN(rn) | RM(ARMRegisters::S1);
  805. cacheFlush(instruction, 2 * sizeof(ARMWord));
  806. }
  807. // Address operations
  808. static void* getRelocatedAddress(void* code, AssemblerLabel label)
  809. {
  810. return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
  811. }
  812. // Address differences
  813. static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
  814. {
  815. return b.m_offset - a.m_offset;
  816. }
  817. static unsigned getCallReturnOffset(AssemblerLabel call)
  818. {
  819. return call.m_offset;
  820. }
  821. // Handle immediates
  822. static ARMWord getOp2(ARMWord imm);
  823. // Fast case if imm is known to be between 0 and 0xff
  824. static ARMWord getOp2Byte(ARMWord imm)
  825. {
  826. ASSERT(imm <= 0xff);
  827. return Op2Immediate | imm;
  828. }
  829. static ARMWord getOp2Half(ARMWord imm)
  830. {
  831. ASSERT(imm <= 0xff);
  832. return ImmediateForHalfWordTransfer | (imm & 0x0f) | ((imm & 0xf0) << 4);
  833. }
  834. #if WTF_ARM_ARCH_AT_LEAST(7)
  835. static ARMWord getImm16Op2(ARMWord imm)
  836. {
  837. if (imm <= 0xffff)
  838. return (imm & 0xf000) << 4 | (imm & 0xfff);
  839. return InvalidImmediate;
  840. }
  841. #endif
  842. ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
  843. void moveImm(ARMWord imm, int dest);
  844. ARMWord encodeComplexImm(ARMWord imm, int dest);
  845. // Memory load/store helpers
  846. void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset);
  847. void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
  848. void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset);
  849. void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
  850. void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset);
  851. void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
  852. // Constant pool hnadlers
  853. static ARMWord placeConstantPoolBarrier(int offset)
  854. {
  855. offset = (offset - sizeof(ARMWord)) >> 2;
  856. ASSERT((offset <= MaximumBranchOffsetDistance && offset >= MinimumBranchOffsetDistance));
  857. return AL | B | (offset & BranchOffsetMask);
  858. }
  859. #if OS(LINUX) && COMPILER(GCC)
  860. static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
  861. {
  862. asm volatile(
  863. "push {r7}\n"
  864. "mov r0, %0\n"
  865. "mov r1, %1\n"
  866. "mov r7, #0xf0000\n"
  867. "add r7, r7, #0x2\n"
  868. "mov r2, #0x0\n"
  869. "svc 0x0\n"
  870. "pop {r7}\n"
  871. :
  872. : "r" (begin), "r" (end)
  873. : "r0", "r1", "r2");
  874. }
  875. #endif
  876. #if OS(LINUX) && COMPILER(RVCT)
  877. static __asm void cacheFlush(void* code, size_t);
  878. #else
  879. static void cacheFlush(void* code, size_t size)
  880. {
  881. #if OS(LINUX) && COMPILER(GCC)
  882. size_t page = pageSize();
  883. uintptr_t current = reinterpret_cast<uintptr_t>(code);
  884. uintptr_t end = current + size;
  885. uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
  886. if (end <= firstPageEnd) {
  887. linuxPageFlush(current, end);
  888. return;
  889. }
  890. linuxPageFlush(current, firstPageEnd);
  891. for (current = firstPageEnd; current + page < end; current += page)
  892. linuxPageFlush(current, current + page);
  893. linuxPageFlush(current, end);
  894. #elif OS(WINCE)
  895. CacheRangeFlush(code, size, CACHE_SYNC_ALL);
  896. #elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE)
  897. UNUSED_PARAM(code);
  898. UNUSED_PARAM(size);
  899. #elif OS(QNX)
  900. msync(code, size, MS_INVALIDATE_ICACHE);
  901. #else
  902. #error "The cacheFlush support is missing on this platform."
  903. #endif
  904. }
  905. #endif
  906. private:
  907. static ARMWord RM(int reg)
  908. {
  909. ASSERT(reg <= ARMRegisters::pc);
  910. return reg;
  911. }
  912. static ARMWord RS(int reg)
  913. {
  914. ASSERT(reg <= ARMRegisters::pc);
  915. return reg << 8;
  916. }
  917. static ARMWord RD(int reg)
  918. {
  919. ASSERT(reg <= ARMRegisters::pc);
  920. return reg << 12;
  921. }
  922. static ARMWord RN(int reg)
  923. {
  924. ASSERT(reg <= ARMRegisters::pc);
  925. return reg << 16;
  926. }
  927. static ARMWord getConditionalField(ARMWord i)
  928. {
  929. return i & ConditionalFieldMask;
  930. }
  931. static ARMWord toARMWord(Condition cc)
  932. {
  933. return static_cast<ARMWord>(cc);
  934. }
  935. static ARMWord toARMWord(uint32_t u)
  936. {
  937. return static_cast<ARMWord>(u);
  938. }
  939. int genInt(int reg, ARMWord imm, bool positive);
  940. ARMBuffer m_buffer;
  941. Jumps m_jumps;
  942. uint32_t m_indexOfTailOfLastWatchpoint;
  943. };
  944. } // namespace JSC
  945. #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
  946. #endif // ARMAssembler_h