main.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. /*
  2. * Copyright (C) 2006-2010 Michael Buesch <m@bues.ch>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2
  6. * as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include "main.h"
  14. #include "list.h"
  15. #include "util.h"
  16. #include "parser.h"
  17. #include "args.h"
  18. #include "initvals.h"
  19. #include <stdio.h>
  20. #include <stdlib.h>
  21. #include <string.h>
  22. extern int yyparse(void);
  23. extern int yydebug;
  24. struct file infile;
  25. const char *infile_name;
  26. const char *outfile_name;
  27. struct out_operand {
  28. enum {
  29. OUTOPER_NORMAL,
  30. OUTOPER_LABELREF,
  31. } type;
  32. union {
  33. unsigned int operand; /* For NORMAL */
  34. struct label *label; /* For LABELREF */
  35. } u;
  36. };
  37. struct code_output {
  38. enum {
  39. OUT_INSN,
  40. OUT_LABEL,
  41. } type;
  42. /* Set to true, if this is a jump instruction.
  43. * This is only used when assembling RET to check
  44. * whether the previous instruction was a jump or not. */
  45. bool is_jump_insn;
  46. unsigned int opcode;
  47. struct out_operand operands[3];
  48. /* The absolute address of this instruction.
  49. * Only used in resolve_labels(). */
  50. unsigned int address;
  51. const char *labelname; /* only for OUT_LABEL */
  52. /* Set to 1, if this is the %start instruction. */
  53. int is_start_insn;
  54. struct list_head list;
  55. };
  56. struct assembler_context {
  57. /* The architecture version (802.11 core revision) */
  58. unsigned int arch;
  59. struct label *start_label;
  60. /* Tracking stuff */
  61. struct statement *cur_stmt;
  62. struct list_head output;
  63. };
  64. #define for_each_statement(ctx, s) \
  65. list_for_each_entry(s, &infile.sl, list) { \
  66. ctx->cur_stmt = s;
  67. #define for_each_statement_end(ctx, s) \
  68. } do { ctx->cur_stmt = NULL; } while (0)
  69. #define _msg_helper(type, stmt, msg, x...) do { \
  70. fprintf(stderr, "Assembler " type); \
  71. if (stmt) { \
  72. fprintf(stderr, " (file \"%s\", line %u)", \
  73. stmt->info.file, \
  74. stmt->info.lineno); \
  75. } \
  76. fprintf(stderr, ":\n " msg "\n" ,##x); \
  77. } while (0)
  78. #define asm_error(ctx, msg, x...) do { \
  79. _msg_helper("ERROR", (ctx)->cur_stmt, msg ,##x); \
  80. exit(1); \
  81. } while (0)
  82. #define asm_warn(ctx, msg, x...) \
  83. _msg_helper("warning", (ctx)->cur_stmt, msg ,##x)
  84. #define asm_info(ctx, msg, x...) \
  85. _msg_helper("info", (ctx)->cur_stmt, msg ,##x)
  86. static void eval_directives(struct assembler_context *ctx)
  87. {
  88. struct statement *s;
  89. struct asmdir *ad;
  90. struct label *l;
  91. int have_start_label = 0;
  92. int have_arch = 0;
  93. unsigned int arch_fallback = 0;
  94. for_each_statement(ctx, s) {
  95. if (s->type == STMT_ASMDIR) {
  96. ad = s->u.asmdir;
  97. switch (ad->type) {
  98. case ADIR_ARCH:
  99. if (have_arch)
  100. asm_error(ctx, "Multiple %%arch definitions");
  101. ctx->arch = ad->u.arch;
  102. if (ctx->arch > 5 && ctx->arch < 15)
  103. arch_fallback = 5;
  104. if (ctx->arch > 15)
  105. arch_fallback = 15;
  106. if (arch_fallback) {
  107. asm_warn(ctx, "Using %%arch %d is incorrect. "
  108. "The wireless core revision %d uses the "
  109. "firmware architecture %d. So use %%arch %d",
  110. ctx->arch, ctx->arch, arch_fallback, arch_fallback);
  111. ctx->arch = arch_fallback;
  112. }
  113. if (ctx->arch != 5 && ctx->arch != 15) {
  114. asm_error(ctx, "Architecture version %u unsupported",
  115. ctx->arch);
  116. }
  117. have_arch = 1;
  118. break;
  119. case ADIR_START:
  120. if (have_start_label)
  121. asm_error(ctx, "Multiple %%start definitions");
  122. ctx->start_label = ad->u.start;
  123. have_start_label = 1;
  124. break;
  125. default:
  126. asm_error(ctx, "Unknown ASM directive");
  127. }
  128. }
  129. } for_each_statement_end(ctx, s);
  130. if (!have_arch)
  131. asm_error(ctx, "No %%arch defined");
  132. if (!have_start_label)
  133. asm_info(ctx, "Using start address 0");
  134. }
  135. static bool is_possible_imm(unsigned int imm)
  136. {
  137. unsigned int mask;
  138. /* Immediates are only possible up to 16bit (wordsize). */
  139. mask = ~0;
  140. mask <<= 16;
  141. if (imm & (1 << 15)) {
  142. if ((imm & mask) != mask &&
  143. (imm & mask) != 0)
  144. return 0;
  145. } else {
  146. if ((imm & mask) != 0)
  147. return 0;
  148. }
  149. return 1;
  150. }
  151. static unsigned int immediate_nr_bits(struct assembler_context *ctx)
  152. {
  153. switch (ctx->arch) {
  154. case 5:
  155. return 10; /* 10 bits */
  156. case 15:
  157. return 11; /* 11 bits */
  158. }
  159. asm_error(ctx, "Internal error: immediate_nr_bits unknown arch\n");
  160. }
  161. static bool is_valid_imm(struct assembler_context *ctx,
  162. unsigned int imm)
  163. {
  164. unsigned int mask;
  165. unsigned int immediate_size;
  166. /* This function checks if the immediate value is representable
  167. * as a native immediate operand.
  168. *
  169. * For v5 architecture the immediate can be 10bit long.
  170. * For v15 architecture the immediate can be 11bit long.
  171. *
  172. * The value is sign-extended, so we allow values
  173. * of 0xFFFA, for example.
  174. */
  175. if (!is_possible_imm(imm))
  176. return 0;
  177. imm &= 0xFFFF;
  178. immediate_size = immediate_nr_bits(ctx);
  179. /* First create a mask with all possible bits for
  180. * an immediate value unset. */
  181. mask = (~0 << immediate_size) & 0xFFFF;
  182. /* Is the sign bit of the immediate set? */
  183. if (imm & (1 << (immediate_size - 1))) {
  184. /* Yes, so all bits above that must also
  185. * be set, otherwise we can't represent this
  186. * value in an operand. */
  187. if ((imm & mask) != mask)
  188. return 0;
  189. } else {
  190. /* All bits above the immediate's size must
  191. * be unset. */
  192. if (imm & mask)
  193. return 0;
  194. }
  195. return 1;
  196. }
  197. /* This checks if the value is nonzero and a power of two. */
  198. static bool is_power_of_two(unsigned int value)
  199. {
  200. return (value && ((value & (value - 1)) == 0));
  201. }
  202. /* This checks if all bits set in the mask are contiguous.
  203. * Zero is also considered a contiguous mask. */
  204. static bool is_contiguous_bitmask(unsigned int mask)
  205. {
  206. unsigned int low_zeros_mask;
  207. bool is_contiguous;
  208. if (mask == 0)
  209. return 1;
  210. /* Turn the lowest zeros of the mask into a bitmask.
  211. * Example: 0b00011000 -> 0b00000111 */
  212. low_zeros_mask = (mask - 1) & ~mask;
  213. /* Adding the low_zeros_mask to the original mask
  214. * basically is a bitwise OR operation.
  215. * If the original mask was contiguous, we end up with a
  216. * contiguous bitmask from bit 0 to the highest bit
  217. * set in the original mask. Adding 1 will result in a single
  218. * bit set, which is a power of two. */
  219. is_contiguous = is_power_of_two(mask + low_zeros_mask + 1);
  220. return is_contiguous;
  221. }
  222. static unsigned int generate_imm_operand(struct assembler_context *ctx,
  223. const struct immediate *imm)
  224. {
  225. unsigned int val, tmp;
  226. unsigned int mask;
  227. val = 0xC00;
  228. if (ctx->arch == 15)
  229. val <<= 1;
  230. tmp = imm->imm;
  231. if (!is_valid_imm(ctx, tmp)) {
  232. asm_warn(ctx, "IMMEDIATE 0x%X (%d) too long "
  233. "(> %u bits + sign). Did you intend to "
  234. "use implicit sign extension?",
  235. tmp, (int)tmp, immediate_nr_bits(ctx) - 1);
  236. }
  237. if (ctx->arch == 15)
  238. tmp &= 0x7FF;
  239. else
  240. tmp &= 0x3FF;
  241. val |= tmp;
  242. return val;
  243. }
  244. static unsigned int generate_reg_operand(struct assembler_context *ctx,
  245. const struct registr *reg)
  246. {
  247. unsigned int val = 0;
  248. switch (reg->type) {
  249. case GPR:
  250. val |= 0xBC0;
  251. if (ctx->arch == 15)
  252. val <<= 1;
  253. if (reg->nr & ~0x3F) /* REVISIT: 128 regs for v15 arch possible? Probably not... */
  254. asm_error(ctx, "GPR-nr too big");
  255. val |= reg->nr;
  256. break;
  257. case SPR:
  258. val |= 0x800;
  259. if (ctx->arch == 15)
  260. val <<= 1;
  261. if (reg->nr & ~0x1FF)
  262. asm_error(ctx, "SPR-nr too big");
  263. val |= reg->nr;
  264. break;
  265. case OFFR:
  266. val |= 0x860;
  267. if (ctx->arch == 15)
  268. val <<= 1;
  269. if (reg->nr & ~0x7)
  270. asm_error(ctx, "OFFR-nr too big");
  271. val |= reg->nr;
  272. break;
  273. default:
  274. asm_error(ctx, "generate_reg_operand() regtype");
  275. }
  276. return val;
  277. }
  278. static unsigned int generate_mem_operand(struct assembler_context *ctx,
  279. const struct memory *mem)
  280. {
  281. unsigned int val = 0, off, reg, off_mask, reg_shift;
  282. switch (mem->type) {
  283. case MEM_DIRECT:
  284. off = mem->offset;
  285. switch (ctx->arch) {
  286. case 5:
  287. if (off & ~0x7FF) {
  288. asm_warn(ctx, "DIRECT memoffset 0x%X too long (> 11 bits)", off);
  289. off &= 0x7FF;
  290. }
  291. break;
  292. case 15:
  293. if (off & ~0xFFF) {
  294. asm_warn(ctx, "DIRECT memoffset 0x%X too long (> 12 bits)", off);
  295. off &= 0xFFF;
  296. }
  297. break;
  298. default:
  299. asm_error(ctx, "Internal error: generate_mem_operand invalid arch");
  300. }
  301. val |= off;
  302. break;
  303. case MEM_INDIRECT:
  304. switch (ctx->arch) {
  305. case 5:
  306. val = 0xA00;
  307. off_mask = 0x3F;
  308. reg_shift = 6;
  309. break;
  310. case 15:
  311. val = 0x1400;
  312. off_mask = 0x7F;
  313. reg_shift = 7;
  314. break;
  315. default:
  316. asm_error(ctx, "Internal error: MEM_INDIRECT invalid arch\n");
  317. }
  318. off = mem->offset;
  319. reg = mem->offr_nr;
  320. if (off & ~off_mask) {
  321. asm_warn(ctx, "INDIRECT memoffset 0x%X too long (> %u bits)",
  322. off, reg_shift);
  323. off &= off_mask;
  324. }
  325. if (reg > 6) {
  326. /* Assembler bug. The parser shouldn't pass this value. */
  327. asm_error(ctx, "OFFR-nr too big");
  328. }
  329. if (reg == 6) {
  330. asm_warn(ctx, "Using offset register 6. This register is broken "
  331. "on certain devices. Use off0 to off5 only.");
  332. }
  333. val |= off;
  334. val |= (reg << reg_shift);
  335. break;
  336. default:
  337. asm_error(ctx, "generate_mem_operand() memtype");
  338. }
  339. return val;
  340. }
  341. static void generate_operand(struct assembler_context *ctx,
  342. const struct operand *oper,
  343. struct out_operand *out)
  344. {
  345. out->type = OUTOPER_NORMAL;
  346. switch (oper->type) {
  347. case OPER_IMM:
  348. out->u.operand = generate_imm_operand(ctx, oper->u.imm);
  349. break;
  350. case OPER_REG:
  351. out->u.operand = generate_reg_operand(ctx, oper->u.reg);
  352. break;
  353. case OPER_MEM:
  354. out->u.operand = generate_mem_operand(ctx, oper->u.mem);
  355. break;
  356. case OPER_LABEL:
  357. out->type = OUTOPER_LABELREF;
  358. out->u.label = oper->u.label;
  359. break;
  360. case OPER_ADDR:
  361. out->u.operand = oper->u.addr->addr;
  362. break;
  363. case OPER_RAW:
  364. out->u.operand = oper->u.raw;
  365. break;
  366. default:
  367. asm_error(ctx, "generate_operand() operstate");
  368. }
  369. }
  370. static struct code_output * do_assemble_insn(struct assembler_context *ctx,
  371. struct instruction *insn,
  372. unsigned int opcode)
  373. {
  374. unsigned int i;
  375. struct operlist *ol;
  376. int nr_oper = 0;
  377. uint64_t code = 0;
  378. struct code_output *out;
  379. struct label *labelref = NULL;
  380. struct operand *oper;
  381. int have_spr_operand = 0;
  382. int have_mem_operand = 0;
  383. out = xmalloc(sizeof(*out));
  384. INIT_LIST_HEAD(&out->list);
  385. out->opcode = opcode;
  386. ol = insn->operands;
  387. if (ARRAY_SIZE(out->operands) > ARRAY_SIZE(ol->oper))
  388. asm_error(ctx, "Internal operand array confusion");
  389. for (i = 0; i < ARRAY_SIZE(out->operands); i++) {
  390. oper = ol->oper[i];
  391. if (!oper)
  392. continue;
  393. /* If this is an INPUT operand (first or second), we must
  394. * make sure that not both are accessing SPR or MEMORY.
  395. * The device only supports one SPR or MEMORY operand in
  396. * the input operands. */
  397. if ((i == 0) || (i == 1)) {
  398. if ((oper->type == OPER_REG) &&
  399. (oper->u.reg->type == SPR)) {
  400. if (have_spr_operand)
  401. asm_error(ctx, "Multiple SPR input operands in one instruction");
  402. have_spr_operand = 1;
  403. }
  404. if (oper->type == OPER_MEM) {
  405. if (have_mem_operand)
  406. asm_error(ctx, "Multiple MEMORY input operands in on instruction");
  407. have_mem_operand = 1;
  408. }
  409. }
  410. generate_operand(ctx, oper, &out->operands[i]);
  411. nr_oper++;
  412. }
  413. if (nr_oper != 3)
  414. asm_error(ctx, "Internal error: nr_oper at "
  415. "lowlevel do_assemble_insn");
  416. list_add_tail(&out->list, &ctx->output);
  417. return out;
  418. }
  419. static void do_assemble_ret(struct assembler_context *ctx,
  420. struct instruction *insn,
  421. unsigned int opcode)
  422. {
  423. struct code_output *out;
  424. /* Get the previous instruction and check whether it
  425. * is a jump instruction. */
  426. list_for_each_entry_reverse(out, &ctx->output, list) {
  427. /* Search the last insn. */
  428. if (out->type == OUT_INSN) {
  429. if (out->is_jump_insn) {
  430. asm_warn(ctx, "RET instruction directly after "
  431. "jump instruction. The hardware won't like this.");
  432. }
  433. break;
  434. }
  435. }
  436. do_assemble_insn(ctx, insn, opcode);
  437. }
  438. static unsigned int merge_ext_into_opcode(struct assembler_context *ctx,
  439. unsigned int opbase,
  440. struct instruction *insn)
  441. {
  442. struct operlist *ol;
  443. unsigned int opcode;
  444. unsigned int mask, shift;
  445. ol = insn->operands;
  446. opcode = opbase;
  447. mask = ol->oper[0]->u.raw;
  448. if (mask & ~0xF)
  449. asm_error(ctx, "opcode MASK extension too big (> 0xF)");
  450. shift = ol->oper[1]->u.raw;
  451. if (shift & ~0xF)
  452. asm_error(ctx, "opcode SHIFT extension too big (> 0xF)");
  453. opcode |= (mask << 4);
  454. opcode |= shift;
  455. ol->oper[0] = ol->oper[2];
  456. ol->oper[1] = ol->oper[3];
  457. ol->oper[2] = ol->oper[4];
  458. return opcode;
  459. }
  460. static unsigned int merge_external_jmp_into_opcode(struct assembler_context *ctx,
  461. unsigned int opbase,
  462. struct instruction *insn)
  463. {
  464. struct operand *fake;
  465. struct registr *fake_reg;
  466. struct operand *target;
  467. struct operlist *ol;
  468. unsigned int cond;
  469. unsigned int opcode;
  470. ol = insn->operands;
  471. opcode = opbase;
  472. cond = ol->oper[0]->u.imm->imm;
  473. if (cond & ~0xFF)
  474. asm_error(ctx, "External jump condition value too big (> 0xFF)");
  475. opcode |= cond;
  476. target = ol->oper[1];
  477. memset(ol->oper, 0, sizeof(ol->oper));
  478. /* This instruction has two fake r0 operands
  479. * at position 0 and 1. */
  480. fake = xmalloc(sizeof(*fake));
  481. fake_reg = xmalloc(sizeof(*fake_reg));
  482. fake->type = OPER_REG;
  483. fake->u.reg = fake_reg;
  484. fake_reg->type = GPR;
  485. fake_reg->nr = 0;
  486. ol->oper[0] = fake;
  487. ol->oper[1] = fake;
  488. ol->oper[2] = target;
  489. return opcode;
  490. }
  491. static void assemble_instruction(struct assembler_context *ctx,
  492. struct instruction *insn);
  493. static void emulate_mov_insn(struct assembler_context *ctx,
  494. struct instruction *insn)
  495. {
  496. struct instruction em_insn;
  497. struct operlist em_ol;
  498. struct operand em_op_shift;
  499. struct operand em_op_mask;
  500. struct operand em_op_x;
  501. struct operand em_op_y;
  502. struct immediate em_imm_x;
  503. struct immediate em_imm_y;
  504. struct operand *in, *out;
  505. unsigned int tmp;
  506. /* This is a pseudo-OP. We emulate it by OR or ORX */
  507. in = insn->operands->oper[0];
  508. out = insn->operands->oper[1];
  509. em_insn.op = OP_OR;
  510. em_ol.oper[0] = in;
  511. em_imm_x.imm = 0;
  512. em_op_x.type = OPER_IMM;
  513. em_op_x.u.imm = &em_imm_x;
  514. em_ol.oper[1] = &em_op_x;
  515. em_ol.oper[2] = out;
  516. if (in->type == OPER_IMM) {
  517. tmp = in->u.imm->imm;
  518. if (!is_possible_imm(tmp))
  519. asm_error(ctx, "MOV operand 0x%X > 16bit", tmp);
  520. if (!is_valid_imm(ctx, tmp)) {
  521. /* Immediate too big for plain OR */
  522. em_insn.op = OP_ORX;
  523. em_op_mask.type = OPER_RAW;
  524. em_op_mask.u.raw = 0x7;
  525. em_op_shift.type = OPER_RAW;
  526. em_op_shift.u.raw = 0x8;
  527. em_imm_x.imm = (tmp & 0xFF00) >> 8;
  528. em_op_x.type = OPER_IMM;
  529. em_op_x.u.imm = &em_imm_x;
  530. em_imm_y.imm = (tmp & 0x00FF);
  531. em_op_y.type = OPER_IMM;
  532. em_op_y.u.imm = &em_imm_y;
  533. em_ol.oper[0] = &em_op_mask;
  534. em_ol.oper[1] = &em_op_shift;
  535. em_ol.oper[2] = &em_op_x;
  536. em_ol.oper[3] = &em_op_y;
  537. em_ol.oper[4] = out;
  538. }
  539. }
  540. em_insn.operands = &em_ol;
  541. assemble_instruction(ctx, &em_insn); /* recurse */
  542. }
  543. static void emulate_jmp_insn(struct assembler_context *ctx,
  544. struct instruction *insn)
  545. {
  546. struct instruction em_insn;
  547. struct operlist em_ol;
  548. struct immediate em_condition;
  549. struct operand em_cond_op;
  550. /* This is a pseudo-OP. We emulate it with
  551. * JEXT 0x7F, target */
  552. em_insn.op = OP_JEXT;
  553. em_condition.imm = 0x7F; /* Ext cond: Always true */
  554. em_cond_op.type = OPER_IMM;
  555. em_cond_op.u.imm = &em_condition;
  556. em_ol.oper[0] = &em_cond_op;
  557. em_ol.oper[1] = insn->operands->oper[0]; /* Target */
  558. em_insn.operands = &em_ol;
  559. assemble_instruction(ctx, &em_insn); /* recurse */
  560. }
  561. static void emulate_jand_insn(struct assembler_context *ctx,
  562. struct instruction *insn,
  563. int inverted)
  564. {
  565. struct code_output *out;
  566. struct instruction em_insn;
  567. struct operlist em_ol;
  568. struct operand em_op_shift;
  569. struct operand em_op_mask;
  570. struct operand em_op_y;
  571. struct immediate em_imm;
  572. struct operand *oper0, *oper1, *oper2;
  573. struct operand *imm_oper = NULL;
  574. unsigned int tmp;
  575. int first_bit, last_bit;
  576. oper0 = insn->operands->oper[0];
  577. oper1 = insn->operands->oper[1];
  578. oper2 = insn->operands->oper[2];
  579. if (oper0->type == OPER_IMM)
  580. imm_oper = oper0;
  581. if (oper1->type == OPER_IMM)
  582. imm_oper = oper1;
  583. if (oper0->type == OPER_IMM && oper1->type == OPER_IMM)
  584. imm_oper = NULL;
  585. if (imm_oper) {
  586. /* We have a single immediate operand.
  587. * Check if it's representable by a normal JAND insn.
  588. */
  589. tmp = imm_oper->u.imm->imm;
  590. if (!is_valid_imm(ctx, tmp)) {
  591. /* Nope, this must be emulated by JZX/JNZX */
  592. if (!is_contiguous_bitmask(tmp)) {
  593. asm_error(ctx, "Long bitmask 0x%X is not contiguous",
  594. tmp);
  595. }
  596. first_bit = ffs(tmp);
  597. last_bit = ffs(~(tmp >> (first_bit - 1))) - 1 + first_bit - 1;
  598. if (inverted)
  599. em_insn.op = OP_JZX;
  600. else
  601. em_insn.op = OP_JNZX;
  602. em_op_shift.type = OPER_RAW;
  603. em_op_shift.u.raw = first_bit - 1;
  604. em_op_mask.type = OPER_RAW;
  605. em_op_mask.u.raw = last_bit - first_bit;
  606. em_imm.imm = 0;
  607. em_op_y.type = OPER_IMM;
  608. em_op_y.u.imm = &em_imm;
  609. em_ol.oper[0] = &em_op_mask;
  610. em_ol.oper[1] = &em_op_shift;
  611. if (oper0->type != OPER_IMM)
  612. em_ol.oper[2] = oper0;
  613. else
  614. em_ol.oper[2] = oper1;
  615. em_ol.oper[3] = &em_op_y;
  616. em_ol.oper[4] = oper2;
  617. em_insn.operands = &em_ol;
  618. assemble_instruction(ctx, &em_insn); /* recurse */
  619. return;
  620. }
  621. }
  622. /* Do a normal JAND/JNAND instruction */
  623. if (inverted)
  624. out = do_assemble_insn(ctx, insn, 0x040 | 0x1);
  625. else
  626. out = do_assemble_insn(ctx, insn, 0x040);
  627. out->is_jump_insn = 1;
  628. }
  629. static void assemble_instruction(struct assembler_context *ctx,
  630. struct instruction *insn)
  631. {
  632. struct code_output *out;
  633. unsigned int opcode;
  634. switch (insn->op) {
  635. case OP_MUL:
  636. do_assemble_insn(ctx, insn, 0x101);
  637. break;
  638. case OP_ADD:
  639. do_assemble_insn(ctx, insn, 0x1C0);
  640. break;
  641. case OP_ADDSC:
  642. do_assemble_insn(ctx, insn, 0x1C2);
  643. break;
  644. case OP_ADDC:
  645. do_assemble_insn(ctx, insn, 0x1C1);
  646. break;
  647. case OP_ADDSCC:
  648. do_assemble_insn(ctx, insn, 0x1C3);
  649. break;
  650. case OP_SUB:
  651. do_assemble_insn(ctx, insn, 0x1D0);
  652. break;
  653. case OP_SUBSC:
  654. do_assemble_insn(ctx, insn, 0x1D2);
  655. break;
  656. case OP_SUBC:
  657. do_assemble_insn(ctx, insn, 0x1D1);
  658. break;
  659. case OP_SUBSCC:
  660. do_assemble_insn(ctx, insn, 0x1D3);
  661. break;
  662. case OP_SRA:
  663. do_assemble_insn(ctx, insn, 0x130);
  664. break;
  665. case OP_OR:
  666. do_assemble_insn(ctx, insn, 0x160);
  667. break;
  668. case OP_AND:
  669. do_assemble_insn(ctx, insn, 0x140);
  670. break;
  671. case OP_XOR:
  672. do_assemble_insn(ctx, insn, 0x170);
  673. break;
  674. case OP_SR:
  675. do_assemble_insn(ctx, insn, 0x120);
  676. break;
  677. case OP_SRX:
  678. opcode = merge_ext_into_opcode(ctx, 0x200, insn);
  679. do_assemble_insn(ctx, insn, opcode);
  680. break;
  681. case OP_SL:
  682. do_assemble_insn(ctx, insn, 0x110);
  683. break;
  684. case OP_RL:
  685. do_assemble_insn(ctx, insn, 0x1A0);
  686. break;
  687. case OP_RR:
  688. do_assemble_insn(ctx, insn, 0x1B0);
  689. break;
  690. case OP_NAND:
  691. do_assemble_insn(ctx, insn, 0x150);
  692. break;
  693. case OP_ORX:
  694. opcode = merge_ext_into_opcode(ctx, 0x300, insn);
  695. do_assemble_insn(ctx, insn, opcode);
  696. break;
  697. case OP_MOV:
  698. emulate_mov_insn(ctx, insn);
  699. return;
  700. case OP_JMP:
  701. emulate_jmp_insn(ctx, insn);
  702. return;
  703. case OP_JAND:
  704. emulate_jand_insn(ctx, insn, 0);
  705. return;
  706. case OP_JNAND:
  707. emulate_jand_insn(ctx, insn, 1);
  708. return;
  709. case OP_JS:
  710. out = do_assemble_insn(ctx, insn, 0x050);
  711. out->is_jump_insn = 1;
  712. break;
  713. case OP_JNS:
  714. out = do_assemble_insn(ctx, insn, 0x050 | 0x1);
  715. out->is_jump_insn = 1;
  716. break;
  717. case OP_JE:
  718. out = do_assemble_insn(ctx, insn, 0x0D0);
  719. out->is_jump_insn = 1;
  720. break;
  721. case OP_JNE:
  722. out = do_assemble_insn(ctx, insn, 0x0D0 | 0x1);
  723. out->is_jump_insn = 1;
  724. break;
  725. case OP_JLS:
  726. out = do_assemble_insn(ctx, insn, 0x0D2);
  727. out->is_jump_insn = 1;
  728. break;
  729. case OP_JGES:
  730. out = do_assemble_insn(ctx, insn, 0x0D2 | 0x1);
  731. out->is_jump_insn = 1;
  732. break;
  733. case OP_JGS:
  734. out = do_assemble_insn(ctx, insn, 0x0D4);
  735. out->is_jump_insn = 1;
  736. break;
  737. case OP_JLES:
  738. out = do_assemble_insn(ctx, insn, 0x0D4 | 0x1);
  739. out->is_jump_insn = 1;
  740. break;
  741. case OP_JL:
  742. out = do_assemble_insn(ctx, insn, 0x0DA);
  743. out->is_jump_insn = 1;
  744. break;
  745. case OP_JGE:
  746. out = do_assemble_insn(ctx, insn, 0x0DA | 0x1);
  747. out->is_jump_insn = 1;
  748. break;
  749. case OP_JG:
  750. out = do_assemble_insn(ctx, insn, 0x0DC);
  751. break;
  752. case OP_JLE:
  753. out = do_assemble_insn(ctx, insn, 0x0DC | 0x1);
  754. out->is_jump_insn = 1;
  755. break;
  756. case OP_JDN:
  757. out = do_assemble_insn(ctx, insn, 0x0D6);
  758. out->is_jump_insn = 1;
  759. break;
  760. case OP_JDPZ:
  761. out = do_assemble_insn(ctx, insn, 0x0D6 | 0x1);
  762. out->is_jump_insn = 1;
  763. break;
  764. case OP_JDP:
  765. out = do_assemble_insn(ctx, insn, 0x0D8);
  766. out->is_jump_insn = 1;
  767. break;
  768. case OP_JDNZ:
  769. out = do_assemble_insn(ctx, insn, 0x0D8 | 0x1);
  770. out->is_jump_insn = 1;
  771. break;
  772. case OP_JZX:
  773. opcode = merge_ext_into_opcode(ctx, 0x400, insn);
  774. out = do_assemble_insn(ctx, insn, opcode);
  775. out->is_jump_insn = 1;
  776. break;
  777. case OP_JNZX:
  778. opcode = merge_ext_into_opcode(ctx, 0x500, insn);
  779. out = do_assemble_insn(ctx, insn, opcode);
  780. out->is_jump_insn = 1;
  781. break;
  782. case OP_JEXT:
  783. opcode = merge_external_jmp_into_opcode(ctx, 0x700, insn);
  784. out = do_assemble_insn(ctx, insn, opcode);
  785. out->is_jump_insn = 1;
  786. break;
  787. case OP_JNEXT:
  788. opcode = merge_external_jmp_into_opcode(ctx, 0x600, insn);
  789. out = do_assemble_insn(ctx, insn, opcode);
  790. out->is_jump_insn = 1;
  791. break;
  792. case OP_CALL:
  793. if (ctx->arch != 5)
  794. asm_error(ctx, "'call' instruction is only supported on arch 5");
  795. do_assemble_insn(ctx, insn, 0x002);
  796. break;
  797. case OP_CALLS:
  798. if (ctx->arch != 15)
  799. asm_error(ctx, "'calls' instruction is only supported on arch 15");
  800. do_assemble_insn(ctx, insn, 0x004);
  801. break;
  802. case OP_RET:
  803. if (ctx->arch != 5)
  804. asm_error(ctx, "'ret' instruction is only supported on arch 5");
  805. do_assemble_ret(ctx, insn, 0x003);
  806. break;
  807. case OP_RETS:
  808. if (ctx->arch != 15)
  809. asm_error(ctx, "'rets' instruction is only supported on arch 15");
  810. do_assemble_insn(ctx, insn, 0x005);
  811. break;
  812. case OP_TKIPH:
  813. case OP_TKIPHS:
  814. case OP_TKIPL:
  815. case OP_TKIPLS:
  816. do_assemble_insn(ctx, insn, 0x1E0);
  817. break;
  818. case OP_NAP:
  819. do_assemble_insn(ctx, insn, 0x001);
  820. break;
  821. case RAW_CODE:
  822. do_assemble_insn(ctx, insn, insn->opcode);
  823. break;
  824. default:
  825. asm_error(ctx, "Unknown op");
  826. }
  827. }
  828. static void assemble_instructions(struct assembler_context *ctx)
  829. {
  830. struct statement *s;
  831. struct instruction *insn;
  832. struct code_output *out;
  833. if (ctx->start_label) {
  834. /* Generate a jump instruction at offset 0 to
  835. * jump to the code start.
  836. */
  837. struct instruction sjmp;
  838. struct operlist ol;
  839. struct operand oper;
  840. oper.type = OPER_LABEL;
  841. oper.u.label = ctx->start_label;
  842. ol.oper[0] = &oper;
  843. sjmp.op = OP_JMP;
  844. sjmp.operands = &ol;
  845. assemble_instruction(ctx, &sjmp);
  846. out = list_entry(ctx->output.next, struct code_output, list);
  847. out->is_start_insn = 1;
  848. }
  849. for_each_statement(ctx, s) {
  850. switch (s->type) {
  851. case STMT_INSN:
  852. ctx->cur_stmt = s;
  853. insn = s->u.insn;
  854. assemble_instruction(ctx, insn);
  855. break;
  856. case STMT_LABEL:
  857. out = xmalloc(sizeof(*out));
  858. INIT_LIST_HEAD(&out->list);
  859. out->type = OUT_LABEL;
  860. out->labelname = s->u.label->name;
  861. list_add_tail(&out->list, &ctx->output);
  862. break;
  863. case STMT_ASMDIR:
  864. break;
  865. }
  866. } for_each_statement_end(ctx, s);
  867. }
  868. /* Resolve a label reference to the address it points to. */
  869. static int get_labeladdress(struct assembler_context *ctx,
  870. struct code_output *this_insn,
  871. struct label *labelref)
  872. {
  873. struct code_output *c;
  874. bool found = 0;
  875. int address = -1;
  876. switch (labelref->direction) {
  877. case LABELREF_ABSOLUTE:
  878. list_for_each_entry(c, &ctx->output, list) {
  879. if (c->type != OUT_LABEL)
  880. continue;
  881. if (strcmp(c->labelname, labelref->name) != 0)
  882. continue;
  883. if (found) {
  884. asm_error(ctx, "Ambiguous label reference \"%s\"",
  885. labelref->name);
  886. }
  887. found = 1;
  888. address = c->address;
  889. }
  890. break;
  891. case LABELREF_RELATIVE_BACK:
  892. for (c = list_entry(this_insn->list.prev, typeof(*c), list);
  893. &c->list != &ctx->output;
  894. c = list_entry(c->list.prev, typeof(*c), list)) {
  895. if (c->type != OUT_LABEL)
  896. continue;
  897. if (strcmp(c->labelname, labelref->name) == 0) {
  898. /* Found */
  899. address = c->address;
  900. break;
  901. }
  902. }
  903. break;
  904. case LABELREF_RELATIVE_FORWARD:
  905. for (c = list_entry(this_insn->list.next, typeof(*c), list);
  906. &c->list != &ctx->output;
  907. c = list_entry(c->list.next, typeof(*c), list)) {
  908. if (c->type != OUT_LABEL)
  909. continue;
  910. if (strcmp(c->labelname, labelref->name) == 0) {
  911. /* Found */
  912. address = c->address;
  913. break;
  914. }
  915. }
  916. break;
  917. }
  918. return address;
  919. }
  920. static void resolve_labels(struct assembler_context *ctx)
  921. {
  922. struct code_output *c;
  923. int addr;
  924. unsigned int i;
  925. unsigned int current_address;
  926. /* Calculate the absolute addresses for each instruction. */
  927. recalculate_addresses:
  928. current_address = 0;
  929. list_for_each_entry(c, &ctx->output, list) {
  930. switch (c->type) {
  931. case OUT_INSN:
  932. c->address = current_address;
  933. current_address++;
  934. break;
  935. case OUT_LABEL:
  936. c->address = current_address;
  937. break;
  938. }
  939. }
  940. /* Resolve the symbolic label references. */
  941. list_for_each_entry(c, &ctx->output, list) {
  942. switch (c->type) {
  943. case OUT_INSN:
  944. if (c->is_start_insn) {
  945. /* If the first %start-jump jumps to 001, we can
  946. * optimize it away, as it's unneeded.
  947. */
  948. i = 2;
  949. if (c->operands[i].type != OUTOPER_LABELREF)
  950. asm_error(ctx, "Internal error, %%start insn oper 2 not labelref");
  951. if (c->operands[i].u.label->direction != LABELREF_ABSOLUTE)
  952. asm_error(ctx, "%%start label reference not absolute");
  953. addr = get_labeladdress(ctx, c, c->operands[i].u.label);
  954. if (addr < 0)
  955. goto does_not_exist;
  956. if (addr == 1) {
  957. list_del(&c->list); /* Kill it */
  958. goto recalculate_addresses;
  959. }
  960. }
  961. for (i = 0; i < ARRAY_SIZE(c->operands); i++) {
  962. if (c->operands[i].type != OUTOPER_LABELREF)
  963. continue;
  964. addr = get_labeladdress(ctx, c, c->operands[i].u.label);
  965. if (addr < 0)
  966. goto does_not_exist;
  967. c->operands[i].u.operand = addr;
  968. if (i != 2) {
  969. /* Is not a jump target.
  970. * Make it be an immediate */
  971. if (ctx->arch == 5)
  972. c->operands[i].u.operand |= 0xC00;
  973. else if (ctx->arch == 15)
  974. c->operands[i].u.operand |= 0xC00 << 1;
  975. else
  976. asm_error(ctx, "Internal error: label res imm");
  977. }
  978. }
  979. break;
  980. case OUT_LABEL:
  981. break;
  982. }
  983. }
  984. return;
  985. does_not_exist:
  986. asm_error(ctx, "Label \"%s\" does not exist",
  987. c->operands[i].u.label->name);
  988. }
  989. static void emit_code(struct assembler_context *ctx)
  990. {
  991. FILE *fd;
  992. const char *fn;
  993. struct code_output *c;
  994. uint64_t code;
  995. unsigned char outbuf[8];
  996. unsigned int insn_count = 0, insn_count_limit;
  997. struct fw_header hdr;
  998. fn = outfile_name;
  999. fd = fopen(fn, "w+");
  1000. if (!fd) {
  1001. fprintf(stderr, "Could not open microcode output file \"%s\"\n", fn);
  1002. exit(1);
  1003. }
  1004. if (IS_VERBOSE_DEBUG)
  1005. printf("\nCode:\n");
  1006. list_for_each_entry(c, &ctx->output, list) {
  1007. switch (c->type) {
  1008. case OUT_INSN:
  1009. insn_count++;
  1010. break;
  1011. default:
  1012. break;
  1013. }
  1014. }
  1015. switch (cmdargs.outformat) {
  1016. case FMT_RAW_LE32:
  1017. case FMT_RAW_BE32:
  1018. /* Nothing */
  1019. break;
  1020. case FMT_B43:
  1021. memset(&hdr, 0, sizeof(hdr));
  1022. hdr.type = FW_TYPE_UCODE;
  1023. hdr.ver = FW_HDR_VER;
  1024. hdr.size = cpu_to_be32(8 * insn_count);
  1025. if (fwrite(&hdr, sizeof(hdr), 1, fd) != 1) {
  1026. fprintf(stderr, "Could not write microcode outfile\n");
  1027. exit(1);
  1028. }
  1029. break;
  1030. }
  1031. switch (ctx->arch) {
  1032. case 5:
  1033. insn_count_limit = NUM_INSN_LIMIT_R5;
  1034. break;
  1035. case 15:
  1036. insn_count_limit = ~0; //FIXME limit currently unknown.
  1037. break;
  1038. default:
  1039. asm_error(ctx, "Internal error: emit_code unknown arch\n");
  1040. }
  1041. if (insn_count > insn_count_limit)
  1042. asm_warn(ctx, "Generating more than %u instructions. This "
  1043. "will overflow the device microcode memory.",
  1044. insn_count_limit);
  1045. list_for_each_entry(c, &ctx->output, list) {
  1046. switch (c->type) {
  1047. case OUT_INSN:
  1048. if (IS_VERBOSE_DEBUG) {
  1049. printf("%03X %04X,%04X,%04X\n",
  1050. c->opcode,
  1051. c->operands[0].u.operand,
  1052. c->operands[1].u.operand,
  1053. c->operands[2].u.operand);
  1054. }
  1055. switch (ctx->arch) {
  1056. case 5:
  1057. code = 0;
  1058. code |= ((uint64_t)c->operands[2].u.operand);
  1059. code |= ((uint64_t)c->operands[1].u.operand) << 12;
  1060. code |= ((uint64_t)c->operands[0].u.operand) << 24;
  1061. code |= ((uint64_t)c->opcode) << 36;
  1062. break;
  1063. case 15:
  1064. code = 0;
  1065. code |= ((uint64_t)c->operands[2].u.operand);
  1066. code |= ((uint64_t)c->operands[1].u.operand) << 13;
  1067. code |= ((uint64_t)c->operands[0].u.operand) << 26;
  1068. code |= ((uint64_t)c->opcode) << 39;
  1069. break;
  1070. default:
  1071. asm_error(ctx, "No emit format for arch %u",
  1072. ctx->arch);
  1073. }
  1074. switch (cmdargs.outformat) {
  1075. case FMT_B43:
  1076. case FMT_RAW_BE32:
  1077. code = ((code & (uint64_t)0xFFFFFFFF00000000ULL) >> 32) |
  1078. ((code & (uint64_t)0x00000000FFFFFFFFULL) << 32);
  1079. outbuf[0] = (code & (uint64_t)0xFF00000000000000ULL) >> 56;
  1080. outbuf[1] = (code & (uint64_t)0x00FF000000000000ULL) >> 48;
  1081. outbuf[2] = (code & (uint64_t)0x0000FF0000000000ULL) >> 40;
  1082. outbuf[3] = (code & (uint64_t)0x000000FF00000000ULL) >> 32;
  1083. outbuf[4] = (code & (uint64_t)0x00000000FF000000ULL) >> 24;
  1084. outbuf[5] = (code & (uint64_t)0x0000000000FF0000ULL) >> 16;
  1085. outbuf[6] = (code & (uint64_t)0x000000000000FF00ULL) >> 8;
  1086. outbuf[7] = (code & (uint64_t)0x00000000000000FFULL) >> 0;
  1087. break;
  1088. case FMT_RAW_LE32:
  1089. outbuf[7] = (code & (uint64_t)0xFF00000000000000ULL) >> 56;
  1090. outbuf[6] = (code & (uint64_t)0x00FF000000000000ULL) >> 48;
  1091. outbuf[5] = (code & (uint64_t)0x0000FF0000000000ULL) >> 40;
  1092. outbuf[4] = (code & (uint64_t)0x000000FF00000000ULL) >> 32;
  1093. outbuf[3] = (code & (uint64_t)0x00000000FF000000ULL) >> 24;
  1094. outbuf[2] = (code & (uint64_t)0x0000000000FF0000ULL) >> 16;
  1095. outbuf[1] = (code & (uint64_t)0x000000000000FF00ULL) >> 8;
  1096. outbuf[0] = (code & (uint64_t)0x00000000000000FFULL) >> 0;
  1097. break;
  1098. }
  1099. if (fwrite(&outbuf, ARRAY_SIZE(outbuf), 1, fd) != 1) {
  1100. fprintf(stderr, "Could not write microcode outfile\n");
  1101. exit(1);
  1102. }
  1103. break;
  1104. case OUT_LABEL:
  1105. break;
  1106. }
  1107. }
  1108. if (cmdargs.print_sizes) {
  1109. printf("%s: text = %u instructions (%u bytes)\n",
  1110. fn, insn_count,
  1111. (unsigned int)(insn_count * sizeof(uint64_t)));
  1112. }
  1113. fclose(fd);
  1114. }
  1115. static void assemble(void)
  1116. {
  1117. struct assembler_context ctx;
  1118. memset(&ctx, 0, sizeof(ctx));
  1119. INIT_LIST_HEAD(&ctx.output);
  1120. eval_directives(&ctx);
  1121. assemble_instructions(&ctx);
  1122. resolve_labels(&ctx);
  1123. emit_code(&ctx);
  1124. }
  1125. static void initialize(void)
  1126. {
  1127. INIT_LIST_HEAD(&infile.sl);
  1128. INIT_LIST_HEAD(&infile.ivals);
  1129. #if YYDEBUG
  1130. if (IS_INSANE_DEBUG)
  1131. yydebug = 1;
  1132. else
  1133. yydebug = 0;
  1134. #endif /* YYDEBUG */
  1135. }
  1136. int main(int argc, char **argv)
  1137. {
  1138. int err, res = 1;
  1139. err = parse_args(argc, argv);
  1140. if (err < 0)
  1141. goto out;
  1142. if (err > 0) {
  1143. res = 0;
  1144. goto out;
  1145. }
  1146. err = open_input_file();
  1147. if (err)
  1148. goto out;
  1149. initialize();
  1150. yyparse();
  1151. assemble();
  1152. assemble_initvals();
  1153. close_input_file();
  1154. res = 0;
  1155. out:
  1156. /* Lazyman simply leaks all allocated memory. */
  1157. return res;
  1158. }