blob: d6d2156790b2724312829954813385dc289b5a07 [file] [log] [blame]
bigbiff673c7ae2020-12-02 19:44:56 -05001/* libs/pixelflinger/codeflinger/MIPS64Assembler.cpp
2**
3** Copyright 2015, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9** http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19/* MIPS64 assembler and ARM->MIPS64 assembly translator
20**
21** The approach is utilize MIPSAssembler generator, using inherited MIPS64Assembler
22** that overrides just the specific MIPS64r6 instructions.
23** For now ArmToMips64Assembler is copied over from ArmToMipsAssembler class,
24** changing some MIPS64r6 related stuff.
25**
26*/
27
28#define LOG_TAG "MIPS64Assembler"
29
30#include <stdio.h>
31#include <stdlib.h>
32
33#include <cutils/properties.h>
34#include <log/log.h>
35#include <private/pixelflinger/ggl_context.h>
36
37#include "MIPS64Assembler.h"
38#include "CodeCache.h"
39#include "mips64_disassem.h"
40
41#define NOT_IMPLEMENTED() LOG_ALWAYS_FATAL("Arm instruction %s not yet implemented\n", __func__)
42#define __unused __attribute__((__unused__))
43
44// ----------------------------------------------------------------------------
45
46namespace android {
47
48// ----------------------------------------------------------------------------
49#if 0
50#pragma mark -
51#pragma mark ArmToMips64Assembler...
52#endif
53
54ArmToMips64Assembler::ArmToMips64Assembler(const sp<Assembly>& assembly,
55 char *abuf, int linesz, int instr_count)
56 : ARMAssemblerInterface(),
57 mArmDisassemblyBuffer(abuf),
58 mArmLineLength(linesz),
59 mArmInstrCount(instr_count),
60 mInum(0),
61 mAssembly(assembly)
62{
63 mMips = new MIPS64Assembler(assembly, this);
64 mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
65 init_conditional_labels();
66}
67
68ArmToMips64Assembler::ArmToMips64Assembler(void* assembly)
69 : ARMAssemblerInterface(),
70 mArmDisassemblyBuffer(NULL),
71 mInum(0),
72 mAssembly(NULL)
73{
74 mMips = new MIPS64Assembler(assembly, this);
75 mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
76 init_conditional_labels();
77}
78
79ArmToMips64Assembler::~ArmToMips64Assembler()
80{
81 delete mMips;
82 free((void *) mArmPC);
83}
84
85uint32_t* ArmToMips64Assembler::pc() const
86{
87 return mMips->pc();
88}
89
90uint32_t* ArmToMips64Assembler::base() const
91{
92 return mMips->base();
93}
94
95void ArmToMips64Assembler::reset()
96{
97 cond.labelnum = 0;
98 mInum = 0;
99 mMips->reset();
100}
101
102int ArmToMips64Assembler::getCodegenArch()
103{
104 return CODEGEN_ARCH_MIPS64;
105}
106
107void ArmToMips64Assembler::comment(const char* string)
108{
109 mMips->comment(string);
110}
111
112void ArmToMips64Assembler::label(const char* theLabel)
113{
114 mMips->label(theLabel);
115}
116
117void ArmToMips64Assembler::disassemble(const char* name)
118{
119 mMips->disassemble(name);
120}
121
122void ArmToMips64Assembler::init_conditional_labels()
123{
124 int i;
125 for (i=0;i<99; ++i) {
126 sprintf(cond.label[i], "cond_%d", i);
127 }
128}
129
130
131
132#if 0
133#pragma mark -
134#pragma mark Prolog/Epilog & Generate...
135#endif
136
137void ArmToMips64Assembler::prolog()
138{
139 mArmPC[mInum++] = pc(); // save starting PC for this instr
140
141 mMips->DADDIU(R_sp, R_sp, -(5 * 8));
142 mMips->SD(R_s0, R_sp, 0);
143 mMips->SD(R_s1, R_sp, 8);
144 mMips->SD(R_s2, R_sp, 16);
145 mMips->SD(R_s3, R_sp, 24);
146 mMips->SD(R_s4, R_sp, 32);
147 mMips->MOVE(R_v0, R_a0); // move context * passed in a0 to v0 (arm r0)
148}
149
150void ArmToMips64Assembler::epilog(uint32_t touched __unused)
151{
152 mArmPC[mInum++] = pc(); // save starting PC for this instr
153
154 mMips->LD(R_s0, R_sp, 0);
155 mMips->LD(R_s1, R_sp, 8);
156 mMips->LD(R_s2, R_sp, 16);
157 mMips->LD(R_s3, R_sp, 24);
158 mMips->LD(R_s4, R_sp, 32);
159 mMips->DADDIU(R_sp, R_sp, (5 * 8));
160 mMips->JR(R_ra);
161
162}
163
164int ArmToMips64Assembler::generate(const char* name)
165{
166 return mMips->generate(name);
167}
168
169void ArmToMips64Assembler::fix_branches()
170{
171 mMips->fix_branches();
172}
173
174uint32_t* ArmToMips64Assembler::pcForLabel(const char* label)
175{
176 return mMips->pcForLabel(label);
177}
178
179void ArmToMips64Assembler::set_condition(int mode, int R1, int R2) {
180 if (mode == 2) {
181 cond.type = SBIT_COND;
182 } else {
183 cond.type = CMP_COND;
184 }
185 cond.r1 = R1;
186 cond.r2 = R2;
187}
188
189//----------------------------------------------------------
190
191#if 0
192#pragma mark -
193#pragma mark Addressing modes & shifters...
194#endif
195
196
197// do not need this for MIPS, but it is in the Interface (virtual)
198int ArmToMips64Assembler::buildImmediate(
199 uint32_t immediate, uint32_t& rot, uint32_t& imm)
200{
201 // for MIPS, any 32-bit immediate is OK
202 rot = 0;
203 imm = immediate;
204 return 0;
205}
206
207// shifters...
208
209bool ArmToMips64Assembler::isValidImmediate(uint32_t immediate __unused)
210{
211 // for MIPS, any 32-bit immediate is OK
212 return true;
213}
214
215uint32_t ArmToMips64Assembler::imm(uint32_t immediate)
216{
217 amode.value = immediate;
218 return AMODE_IMM;
219}
220
221uint32_t ArmToMips64Assembler::reg_imm(int Rm, int type, uint32_t shift)
222{
223 amode.reg = Rm;
224 amode.stype = type;
225 amode.value = shift;
226 return AMODE_REG_IMM;
227}
228
229uint32_t ArmToMips64Assembler::reg_rrx(int Rm __unused)
230{
231 // reg_rrx mode is not used in the GLLAssember code at this time
232 return AMODE_UNSUPPORTED;
233}
234
235uint32_t ArmToMips64Assembler::reg_reg(int Rm __unused, int type __unused,
236 int Rs __unused)
237{
238 // reg_reg mode is not used in the GLLAssember code at this time
239 return AMODE_UNSUPPORTED;
240}
241
242
243// addressing modes...
244// LDR(B)/STR(B)/PLD (immediate and Rm can be negative, which indicate U=0)
245uint32_t ArmToMips64Assembler::immed12_pre(int32_t immed12, int W)
246{
247 LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
248 "LDR(B)/STR(B)/PLD immediate too big (%08x)",
249 immed12);
250 amode.value = immed12;
251 amode.writeback = W;
252 return AMODE_IMM_12_PRE;
253}
254
255uint32_t ArmToMips64Assembler::immed12_post(int32_t immed12)
256{
257 LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
258 "LDR(B)/STR(B)/PLD immediate too big (%08x)",
259 immed12);
260
261 amode.value = immed12;
262 return AMODE_IMM_12_POST;
263}
264
265uint32_t ArmToMips64Assembler::reg_scale_pre(int Rm, int type,
266 uint32_t shift, int W)
267{
268 LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented");
269
270 amode.reg = Rm;
271 // amode.stype = type; // more advanced modes not used in GGLAssembler yet
272 // amode.value = shift;
273 // amode.writeback = W;
274 return AMODE_REG_SCALE_PRE;
275}
276
277uint32_t ArmToMips64Assembler::reg_scale_post(int Rm __unused, int type __unused,
278 uint32_t shift __unused)
279{
280 LOG_ALWAYS_FATAL("adr mode reg_scale_post not yet implemented\n");
281 return AMODE_UNSUPPORTED;
282}
283
284// LDRH/LDRSB/LDRSH/STRH (immediate and Rm can be negative, which indicate U=0)
285uint32_t ArmToMips64Assembler::immed8_pre(int32_t immed8, int W __unused)
286{
287 LOG_ALWAYS_FATAL("adr mode immed8_pre not yet implemented\n");
288
289 LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
290 "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
291 immed8);
292 return AMODE_IMM_8_PRE;
293}
294
295uint32_t ArmToMips64Assembler::immed8_post(int32_t immed8)
296{
297 LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
298 "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
299 immed8);
300 amode.value = immed8;
301 return AMODE_IMM_8_POST;
302}
303
304uint32_t ArmToMips64Assembler::reg_pre(int Rm, int W)
305{
306 LOG_ALWAYS_FATAL_IF(W, "reg_pre writeback not yet implemented");
307 amode.reg = Rm;
308 return AMODE_REG_PRE;
309}
310
311uint32_t ArmToMips64Assembler::reg_post(int Rm __unused)
312{
313 LOG_ALWAYS_FATAL("adr mode reg_post not yet implemented\n");
314 return AMODE_UNSUPPORTED;
315}
316
317
318
319// ----------------------------------------------------------------------------
320
321#if 0
322#pragma mark -
323#pragma mark Data Processing...
324#endif
325
326// check if the operand registers from a previous CMP or S-bit instruction
327// would be overwritten by this instruction. If so, move the value to a
328// safe register.
329// Note that we cannot tell at _this_ instruction time if a future (conditional)
330// instruction will _also_ use this value (a defect of the simple 1-pass, one-
331// instruction-at-a-time translation). Therefore we must be conservative and
332// save the value before it is overwritten. This costs an extra MOVE instr.
333
334void ArmToMips64Assembler::protectConditionalOperands(int Rd)
335{
336 if (Rd == cond.r1) {
337 mMips->MOVE(R_cmp, cond.r1);
338 cond.r1 = R_cmp;
339 }
340 if (cond.type == CMP_COND && Rd == cond.r2) {
341 mMips->MOVE(R_cmp2, cond.r2);
342 cond.r2 = R_cmp2;
343 }
344}
345
346
347// interprets the addressing mode, and generates the common code
348// used by the majority of data-processing ops. Many MIPS instructions
349// have a register-based form and a different immediate form. See
350// opAND below for an example. (this could be inlined)
351//
352// this works with the imm(), reg_imm() methods above, which are directly
353// called by the GLLAssembler.
354// note: _signed parameter defaults to false (un-signed)
355// note: tmpReg parameter defaults to 1, MIPS register AT
356int ArmToMips64Assembler::dataProcAdrModes(int op, int& source, bool _signed, int tmpReg)
357{
358 if (op < AMODE_REG) {
359 source = op;
360 return SRC_REG;
361 } else if (op == AMODE_IMM) {
362 if ((!_signed && amode.value > 0xffff)
363 || (_signed && ((int)amode.value < -32768 || (int)amode.value > 32767) )) {
364 mMips->LUI(tmpReg, (amode.value >> 16));
365 if (amode.value & 0x0000ffff) {
366 mMips->ORI(tmpReg, tmpReg, (amode.value & 0x0000ffff));
367 }
368 source = tmpReg;
369 return SRC_REG;
370 } else {
371 source = amode.value;
372 return SRC_IMM;
373 }
374 } else if (op == AMODE_REG_IMM) {
375 switch (amode.stype) {
376 case LSL: mMips->SLL(tmpReg, amode.reg, amode.value); break;
377 case LSR: mMips->SRL(tmpReg, amode.reg, amode.value); break;
378 case ASR: mMips->SRA(tmpReg, amode.reg, amode.value); break;
379 case ROR: mMips->ROTR(tmpReg, amode.reg, amode.value); break;
380 }
381 source = tmpReg;
382 return SRC_REG;
383 } else { // adr mode RRX is not used in GGL Assembler at this time
384 // we are screwed, this should be exception, assert-fail or something
385 LOG_ALWAYS_FATAL("adr mode reg_rrx not yet implemented\n");
386 return SRC_ERROR;
387 }
388}
389
390
391void ArmToMips64Assembler::dataProcessing(int opcode, int cc,
392 int s, int Rd, int Rn, uint32_t Op2)
393{
394 int src; // src is modified by dataProcAdrModes() - passed as int&
395
396 if (cc != AL) {
397 protectConditionalOperands(Rd);
398 // the branch tests register(s) set by prev CMP or instr with 'S' bit set
399 // inverse the condition to jump past this conditional instruction
400 ArmToMips64Assembler::B(cc^1, cond.label[++cond.labelnum]);
401 } else {
402 mArmPC[mInum++] = pc(); // save starting PC for this instr
403 }
404
405 switch (opcode) {
406 case opAND:
407 if (dataProcAdrModes(Op2, src) == SRC_REG) {
408 mMips->AND(Rd, Rn, src);
409 } else { // adr mode was SRC_IMM
410 mMips->ANDI(Rd, Rn, src);
411 }
412 break;
413
414 case opADD:
415 // set "signed" to true for adr modes
416 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
417 mMips->ADDU(Rd, Rn, src);
418 } else { // adr mode was SRC_IMM
419 mMips->ADDIU(Rd, Rn, src);
420 }
421 break;
422
423 case opSUB:
424 // set "signed" to true for adr modes
425 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
426 mMips->SUBU(Rd, Rn, src);
427 } else { // adr mode was SRC_IMM
428 mMips->SUBIU(Rd, Rn, src);
429 }
430 break;
431
432 case opADD64:
433 // set "signed" to true for adr modes
434 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
435 mMips->DADDU(Rd, Rn, src);
436 } else { // adr mode was SRC_IMM
437 mMips->DADDIU(Rd, Rn, src);
438 }
439 break;
440
441 case opSUB64:
442 // set "signed" to true for adr modes
443 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
444 mMips->DSUBU(Rd, Rn, src);
445 } else { // adr mode was SRC_IMM
446 mMips->DSUBIU(Rd, Rn, src);
447 }
448 break;
449
450 case opEOR:
451 if (dataProcAdrModes(Op2, src) == SRC_REG) {
452 mMips->XOR(Rd, Rn, src);
453 } else { // adr mode was SRC_IMM
454 mMips->XORI(Rd, Rn, src);
455 }
456 break;
457
458 case opORR:
459 if (dataProcAdrModes(Op2, src) == SRC_REG) {
460 mMips->OR(Rd, Rn, src);
461 } else { // adr mode was SRC_IMM
462 mMips->ORI(Rd, Rn, src);
463 }
464 break;
465
466 case opBIC:
467 if (dataProcAdrModes(Op2, src) == SRC_IMM) {
468 // if we are 16-bit imnmediate, load to AT reg
469 mMips->ORI(R_at, 0, src);
470 src = R_at;
471 }
472 mMips->NOT(R_at, src);
473 mMips->AND(Rd, Rn, R_at);
474 break;
475
476 case opRSB:
477 if (dataProcAdrModes(Op2, src) == SRC_IMM) {
478 // if we are 16-bit imnmediate, load to AT reg
479 mMips->ORI(R_at, 0, src);
480 src = R_at;
481 }
482 mMips->SUBU(Rd, src, Rn); // subu with the parameters reversed
483 break;
484
485 case opMOV:
486 if (Op2 < AMODE_REG) { // op2 is reg # in this case
487 mMips->MOVE(Rd, Op2);
488 } else if (Op2 == AMODE_IMM) {
489 if (amode.value > 0xffff) {
490 mMips->LUI(Rd, (amode.value >> 16));
491 if (amode.value & 0x0000ffff) {
492 mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
493 }
494 } else {
495 mMips->ORI(Rd, 0, amode.value);
496 }
497 } else if (Op2 == AMODE_REG_IMM) {
498 switch (amode.stype) {
499 case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
500 case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
501 case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
502 case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
503 }
504 }
505 else {
506 // adr mode RRX is not used in GGL Assembler at this time
507 mMips->UNIMPL();
508 }
509 break;
510
511 case opMVN: // this is a 1's complement: NOT
512 if (Op2 < AMODE_REG) { // op2 is reg # in this case
513 mMips->NOR(Rd, Op2, 0); // NOT is NOR with 0
514 break;
515 } else if (Op2 == AMODE_IMM) {
516 if (amode.value > 0xffff) {
517 mMips->LUI(Rd, (amode.value >> 16));
518 if (amode.value & 0x0000ffff) {
519 mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
520 }
521 } else {
522 mMips->ORI(Rd, 0, amode.value);
523 }
524 } else if (Op2 == AMODE_REG_IMM) {
525 switch (amode.stype) {
526 case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
527 case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
528 case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
529 case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
530 }
531 }
532 else {
533 // adr mode RRX is not used in GGL Assembler at this time
534 mMips->UNIMPL();
535 }
536 mMips->NOR(Rd, Rd, 0); // NOT is NOR with 0
537 break;
538
539 case opCMP:
540 // Either operand of a CMP instr could get overwritten by a subsequent
541 // conditional instruction, which is ok, _UNLESS_ there is a _second_
542 // conditional instruction. Under MIPS, this requires doing the comparison
543 // again (SLT), and the original operands must be available. (and this
544 // pattern of multiple conditional instructions from same CMP _is_ used
545 // in GGL-Assembler)
546 //
547 // For now, if a conditional instr overwrites the operands, we will
548 // move them to dedicated temp regs. This is ugly, and inefficient,
549 // and should be optimized.
550 //
551 // WARNING: making an _Assumption_ that CMP operand regs will NOT be
552 // trashed by intervening NON-conditional instructions. In the general
553 // case this is legal, but it is NOT currently done in GGL-Assembler.
554
555 cond.type = CMP_COND;
556 cond.r1 = Rn;
557 if (dataProcAdrModes(Op2, src, false, R_cmp2) == SRC_REG) {
558 cond.r2 = src;
559 } else { // adr mode was SRC_IMM
560 mMips->ORI(R_cmp2, R_zero, src);
561 cond.r2 = R_cmp2;
562 }
563
564 break;
565
566
567 case opTST:
568 case opTEQ:
569 case opCMN:
570 case opADC:
571 case opSBC:
572 case opRSC:
573 mMips->UNIMPL(); // currently unused in GGL Assembler code
574 break;
575 }
576
577 if (cc != AL) {
578 mMips->label(cond.label[cond.labelnum]);
579 }
580 if (s && opcode != opCMP) {
581 cond.type = SBIT_COND;
582 cond.r1 = Rd;
583 }
584}
585
586
587
588#if 0
589#pragma mark -
590#pragma mark Multiply...
591#endif
592
593// multiply, accumulate
594void ArmToMips64Assembler::MLA(int cc __unused, int s,
595 int Rd, int Rm, int Rs, int Rn) {
596
597 //ALOGW("MLA");
598 mArmPC[mInum++] = pc(); // save starting PC for this instr
599
600 mMips->MUL(R_at, Rm, Rs);
601 mMips->ADDU(Rd, R_at, Rn);
602 if (s) {
603 cond.type = SBIT_COND;
604 cond.r1 = Rd;
605 }
606}
607
608void ArmToMips64Assembler::MUL(int cc __unused, int s,
609 int Rd, int Rm, int Rs) {
610 mArmPC[mInum++] = pc();
611 mMips->MUL(Rd, Rm, Rs);
612 if (s) {
613 cond.type = SBIT_COND;
614 cond.r1 = Rd;
615 }
616}
617
618void ArmToMips64Assembler::UMULL(int cc __unused, int s,
619 int RdLo, int RdHi, int Rm, int Rs) {
620 mArmPC[mInum++] = pc();
621 mMips->MUH(RdHi, Rm, Rs);
622 mMips->MUL(RdLo, Rm, Rs);
623
624 if (s) {
625 cond.type = SBIT_COND;
626 cond.r1 = RdHi; // BUG...
627 LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
628 }
629}
630
631void ArmToMips64Assembler::UMUAL(int cc __unused, int s,
632 int RdLo __unused, int RdHi, int Rm __unused, int Rs __unused) {
633 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
634 "UMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
635 // *mPC++ = (cc<<28) | (1<<23) | (1<<21) | (s<<20) |
636 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
637 mArmPC[mInum++] = pc();
638 mMips->NOP2();
639 NOT_IMPLEMENTED();
640 if (s) {
641 cond.type = SBIT_COND;
642 cond.r1 = RdHi; // BUG...
643 LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
644 }
645}
646
647void ArmToMips64Assembler::SMULL(int cc __unused, int s,
648 int RdLo __unused, int RdHi, int Rm __unused, int Rs __unused) {
649 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
650 "SMULL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
651 // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (s<<20) |
652 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
653 mArmPC[mInum++] = pc();
654 mMips->NOP2();
655 NOT_IMPLEMENTED();
656 if (s) {
657 cond.type = SBIT_COND;
658 cond.r1 = RdHi; // BUG...
659 LOG_ALWAYS_FATAL("Condition on SMULL must be on 64-bit result\n");
660 }
661}
662void ArmToMips64Assembler::SMUAL(int cc __unused, int s,
663 int RdLo __unused, int RdHi, int Rm __unused, int Rs __unused) {
664 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
665 "SMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
666 // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (1<<21) | (s<<20) |
667 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
668 mArmPC[mInum++] = pc();
669 mMips->NOP2();
670 NOT_IMPLEMENTED();
671 if (s) {
672 cond.type = SBIT_COND;
673 cond.r1 = RdHi; // BUG...
674 LOG_ALWAYS_FATAL("Condition on SMUAL must be on 64-bit result\n");
675 }
676}
677
678
679
680#if 0
681#pragma mark -
682#pragma mark Branches...
683#endif
684
685// branches...
686
687void ArmToMips64Assembler::B(int cc, const char* label)
688{
689 mArmPC[mInum++] = pc();
690 if (cond.type == SBIT_COND) { cond.r2 = R_zero; }
691
692 switch(cc) {
693 case EQ: mMips->BEQ(cond.r1, cond.r2, label); break;
694 case NE: mMips->BNE(cond.r1, cond.r2, label); break;
695 case HS: mMips->BGEU(cond.r1, cond.r2, label); break;
696 case LO: mMips->BLTU(cond.r1, cond.r2, label); break;
697 case MI: mMips->BLT(cond.r1, cond.r2, label); break;
698 case PL: mMips->BGE(cond.r1, cond.r2, label); break;
699
700 case HI: mMips->BGTU(cond.r1, cond.r2, label); break;
701 case LS: mMips->BLEU(cond.r1, cond.r2, label); break;
702 case GE: mMips->BGE(cond.r1, cond.r2, label); break;
703 case LT: mMips->BLT(cond.r1, cond.r2, label); break;
704 case GT: mMips->BGT(cond.r1, cond.r2, label); break;
705 case LE: mMips->BLE(cond.r1, cond.r2, label); break;
706 case AL: mMips->B(label); break;
707 case NV: /* B Never - no instruction */ break;
708
709 case VS:
710 case VC:
711 default:
712 LOG_ALWAYS_FATAL("Unsupported cc: %02x\n", cc);
713 break;
714 }
715}
716
717void ArmToMips64Assembler::BL(int cc __unused, const char* label __unused)
718{
719 LOG_ALWAYS_FATAL("branch-and-link not supported yet\n");
720 mArmPC[mInum++] = pc();
721}
722
723// no use for Branches with integer PC, but they're in the Interface class ....
724void ArmToMips64Assembler::B(int cc __unused, uint32_t* to_pc __unused)
725{
726 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
727 mArmPC[mInum++] = pc();
728}
729
730void ArmToMips64Assembler::BL(int cc __unused, uint32_t* to_pc __unused)
731{
732 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
733 mArmPC[mInum++] = pc();
734}
735
736void ArmToMips64Assembler::BX(int cc __unused, int Rn __unused)
737{
738 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
739 mArmPC[mInum++] = pc();
740}
741
742
743
744#if 0
745#pragma mark -
746#pragma mark Data Transfer...
747#endif
748
749// data transfer...
750void ArmToMips64Assembler::LDR(int cc __unused, int Rd, int Rn, uint32_t offset)
751{
752 mArmPC[mInum++] = pc();
753 // work-around for ARM default address mode of immed12_pre(0)
754 if (offset > AMODE_UNSUPPORTED) offset = 0;
755 switch (offset) {
756 case 0:
757 amode.value = 0;
758 amode.writeback = 0;
759 // fall thru to next case ....
760 case AMODE_IMM_12_PRE:
761 if (Rn == ARMAssemblerInterface::SP) {
762 Rn = R_sp; // convert LDR via Arm SP to LW via Mips SP
763 }
764 mMips->LW(Rd, Rn, amode.value);
765 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
766 mMips->DADDIU(Rn, Rn, amode.value);
767 }
768 break;
769 case AMODE_IMM_12_POST:
770 if (Rn == ARMAssemblerInterface::SP) {
771 Rn = R_sp; // convert STR thru Arm SP to STR thru Mips SP
772 }
773 mMips->LW(Rd, Rn, 0);
774 mMips->DADDIU(Rn, Rn, amode.value);
775 break;
776 case AMODE_REG_SCALE_PRE:
777 // we only support simple base + index, no advanced modes for this one yet
778 mMips->DADDU(R_at, Rn, amode.reg);
779 mMips->LW(Rd, R_at, 0);
780 break;
781 }
782}
783
784void ArmToMips64Assembler::LDRB(int cc __unused, int Rd, int Rn, uint32_t offset)
785{
786 mArmPC[mInum++] = pc();
787 // work-around for ARM default address mode of immed12_pre(0)
788 if (offset > AMODE_UNSUPPORTED) offset = 0;
789 switch (offset) {
790 case 0:
791 amode.value = 0;
792 amode.writeback = 0;
793 // fall thru to next case ....
794 case AMODE_IMM_12_PRE:
795 mMips->LBU(Rd, Rn, amode.value);
796 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
797 mMips->DADDIU(Rn, Rn, amode.value);
798 }
799 break;
800 case AMODE_IMM_12_POST:
801 mMips->LBU(Rd, Rn, 0);
802 mMips->DADDIU(Rn, Rn, amode.value);
803 break;
804 case AMODE_REG_SCALE_PRE:
805 // we only support simple base + index, no advanced modes for this one yet
806 mMips->DADDU(R_at, Rn, amode.reg);
807 mMips->LBU(Rd, R_at, 0);
808 break;
809 }
810
811}
812
813void ArmToMips64Assembler::STR(int cc __unused, int Rd, int Rn, uint32_t offset)
814{
815 mArmPC[mInum++] = pc();
816 // work-around for ARM default address mode of immed12_pre(0)
817 if (offset > AMODE_UNSUPPORTED) offset = 0;
818 switch (offset) {
819 case 0:
820 amode.value = 0;
821 amode.writeback = 0;
822 // fall thru to next case ....
823 case AMODE_IMM_12_PRE:
824 if (Rn == ARMAssemblerInterface::SP) {
825 Rn = R_sp; // convert STR thru Arm SP to SW thru Mips SP
826 }
827 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
828 // If we will writeback, then update the index reg, then store.
829 // This correctly handles stack-push case.
830 mMips->DADDIU(Rn, Rn, amode.value);
831 mMips->SW(Rd, Rn, 0);
832 } else {
833 // No writeback so store offset by value
834 mMips->SW(Rd, Rn, amode.value);
835 }
836 break;
837 case AMODE_IMM_12_POST:
838 mMips->SW(Rd, Rn, 0);
839 mMips->DADDIU(Rn, Rn, amode.value); // post index always writes back
840 break;
841 case AMODE_REG_SCALE_PRE:
842 // we only support simple base + index, no advanced modes for this one yet
843 mMips->DADDU(R_at, Rn, amode.reg);
844 mMips->SW(Rd, R_at, 0);
845 break;
846 }
847}
848
849void ArmToMips64Assembler::STRB(int cc __unused, int Rd, int Rn, uint32_t offset)
850{
851 mArmPC[mInum++] = pc();
852 // work-around for ARM default address mode of immed12_pre(0)
853 if (offset > AMODE_UNSUPPORTED) offset = 0;
854 switch (offset) {
855 case 0:
856 amode.value = 0;
857 amode.writeback = 0;
858 // fall thru to next case ....
859 case AMODE_IMM_12_PRE:
860 mMips->SB(Rd, Rn, amode.value);
861 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
862 mMips->DADDIU(Rn, Rn, amode.value);
863 }
864 break;
865 case AMODE_IMM_12_POST:
866 mMips->SB(Rd, Rn, 0);
867 mMips->DADDIU(Rn, Rn, amode.value);
868 break;
869 case AMODE_REG_SCALE_PRE:
870 // we only support simple base + index, no advanced modes for this one yet
871 mMips->DADDU(R_at, Rn, amode.reg);
872 mMips->SB(Rd, R_at, 0);
873 break;
874 }
875}
876
877void ArmToMips64Assembler::LDRH(int cc __unused, int Rd, int Rn, uint32_t offset)
878{
879 mArmPC[mInum++] = pc();
880 // work-around for ARM default address mode of immed8_pre(0)
881 if (offset > AMODE_UNSUPPORTED) offset = 0;
882 switch (offset) {
883 case 0:
884 amode.value = 0;
885 // fall thru to next case ....
886 case AMODE_IMM_8_PRE: // no support yet for writeback
887 mMips->LHU(Rd, Rn, amode.value);
888 break;
889 case AMODE_IMM_8_POST:
890 mMips->LHU(Rd, Rn, 0);
891 mMips->DADDIU(Rn, Rn, amode.value);
892 break;
893 case AMODE_REG_PRE:
894 // we only support simple base +/- index
895 if (amode.reg >= 0) {
896 mMips->DADDU(R_at, Rn, amode.reg);
897 } else {
898 mMips->DSUBU(R_at, Rn, abs(amode.reg));
899 }
900 mMips->LHU(Rd, R_at, 0);
901 break;
902 }
903}
904
905void ArmToMips64Assembler::LDRSB(int cc __unused, int Rd __unused,
906 int Rn __unused, uint32_t offset __unused)
907{
908 mArmPC[mInum++] = pc();
909 mMips->NOP2();
910 NOT_IMPLEMENTED();
911}
912
913void ArmToMips64Assembler::LDRSH(int cc __unused, int Rd __unused,
914 int Rn __unused, uint32_t offset __unused)
915{
916 mArmPC[mInum++] = pc();
917 mMips->NOP2();
918 NOT_IMPLEMENTED();
919}
920
921void ArmToMips64Assembler::STRH(int cc __unused, int Rd, int Rn, uint32_t offset)
922{
923 mArmPC[mInum++] = pc();
924 // work-around for ARM default address mode of immed8_pre(0)
925 if (offset > AMODE_UNSUPPORTED) offset = 0;
926 switch (offset) {
927 case 0:
928 amode.value = 0;
929 // fall thru to next case ....
930 case AMODE_IMM_8_PRE: // no support yet for writeback
931 mMips->SH(Rd, Rn, amode.value);
932 break;
933 case AMODE_IMM_8_POST:
934 mMips->SH(Rd, Rn, 0);
935 mMips->DADDIU(Rn, Rn, amode.value);
936 break;
937 case AMODE_REG_PRE:
938 // we only support simple base +/- index
939 if (amode.reg >= 0) {
940 mMips->DADDU(R_at, Rn, amode.reg);
941 } else {
942 mMips->DSUBU(R_at, Rn, abs(amode.reg));
943 }
944 mMips->SH(Rd, R_at, 0);
945 break;
946 }
947}
948
949
950
951#if 0
952#pragma mark -
953#pragma mark Block Data Transfer...
954#endif
955
956// block data transfer...
957void ArmToMips64Assembler::LDM(int cc __unused, int dir __unused,
958 int Rn __unused, int W __unused, uint32_t reg_list __unused)
959{ // ED FD EA FA IB IA DB DA
960 // const uint8_t P[8] = { 1, 0, 1, 0, 1, 0, 1, 0 };
961 // const uint8_t U[8] = { 1, 1, 0, 0, 1, 1, 0, 0 };
962 // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
963 // (uint32_t(U[dir])<<23) | (1<<20) | (W<<21) | (Rn<<16) | reg_list;
964 mArmPC[mInum++] = pc();
965 mMips->NOP2();
966 NOT_IMPLEMENTED();
967}
968
969void ArmToMips64Assembler::STM(int cc __unused, int dir __unused,
970 int Rn __unused, int W __unused, uint32_t reg_list __unused)
971{ // FA EA FD ED IB IA DB DA
972 // const uint8_t P[8] = { 0, 1, 0, 1, 1, 0, 1, 0 };
973 // const uint8_t U[8] = { 0, 0, 1, 1, 1, 1, 0, 0 };
974 // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
975 // (uint32_t(U[dir])<<23) | (0<<20) | (W<<21) | (Rn<<16) | reg_list;
976 mArmPC[mInum++] = pc();
977 mMips->NOP2();
978 NOT_IMPLEMENTED();
979}
980
981
982
983#if 0
984#pragma mark -
985#pragma mark Special...
986#endif
987
988// special...
989void ArmToMips64Assembler::SWP(int cc __unused, int Rn __unused,
990 int Rd __unused, int Rm __unused) {
991 // *mPC++ = (cc<<28) | (2<<23) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
992 mArmPC[mInum++] = pc();
993 mMips->NOP2();
994 NOT_IMPLEMENTED();
995}
996
997void ArmToMips64Assembler::SWPB(int cc __unused, int Rn __unused,
998 int Rd __unused, int Rm __unused) {
999 // *mPC++ = (cc<<28) | (2<<23) | (1<<22) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
1000 mArmPC[mInum++] = pc();
1001 mMips->NOP2();
1002 NOT_IMPLEMENTED();
1003}
1004
1005void ArmToMips64Assembler::SWI(int cc __unused, uint32_t comment __unused) {
1006 // *mPC++ = (cc<<28) | (0xF<<24) | comment;
1007 mArmPC[mInum++] = pc();
1008 mMips->NOP2();
1009 NOT_IMPLEMENTED();
1010}
1011
1012
1013#if 0
1014#pragma mark -
1015#pragma mark DSP instructions...
1016#endif
1017
1018// DSP instructions...
1019void ArmToMips64Assembler::PLD(int Rn __unused, uint32_t offset) {
1020 LOG_ALWAYS_FATAL_IF(!((offset&(1<<24)) && !(offset&(1<<21))),
1021 "PLD only P=1, W=0");
1022 // *mPC++ = 0xF550F000 | (Rn<<16) | offset;
1023 mArmPC[mInum++] = pc();
1024 mMips->NOP2();
1025 NOT_IMPLEMENTED();
1026}
1027
1028void ArmToMips64Assembler::CLZ(int cc __unused, int Rd, int Rm)
1029{
1030 mArmPC[mInum++] = pc();
1031 mMips->CLZ(Rd, Rm);
1032}
1033
1034void ArmToMips64Assembler::QADD(int cc __unused, int Rd __unused,
1035 int Rm __unused, int Rn __unused)
1036{
1037 // *mPC++ = (cc<<28) | 0x1000050 | (Rn<<16) | (Rd<<12) | Rm;
1038 mArmPC[mInum++] = pc();
1039 mMips->NOP2();
1040 NOT_IMPLEMENTED();
1041}
1042
1043void ArmToMips64Assembler::QDADD(int cc __unused, int Rd __unused,
1044 int Rm __unused, int Rn __unused)
1045{
1046 // *mPC++ = (cc<<28) | 0x1400050 | (Rn<<16) | (Rd<<12) | Rm;
1047 mArmPC[mInum++] = pc();
1048 mMips->NOP2();
1049 NOT_IMPLEMENTED();
1050}
1051
1052void ArmToMips64Assembler::QSUB(int cc __unused, int Rd __unused,
1053 int Rm __unused, int Rn __unused)
1054{
1055 // *mPC++ = (cc<<28) | 0x1200050 | (Rn<<16) | (Rd<<12) | Rm;
1056 mArmPC[mInum++] = pc();
1057 mMips->NOP2();
1058 NOT_IMPLEMENTED();
1059}
1060
1061void ArmToMips64Assembler::QDSUB(int cc __unused, int Rd __unused,
1062 int Rm __unused, int Rn __unused)
1063{
1064 // *mPC++ = (cc<<28) | 0x1600050 | (Rn<<16) | (Rd<<12) | Rm;
1065 mArmPC[mInum++] = pc();
1066 mMips->NOP2();
1067 NOT_IMPLEMENTED();
1068}
1069
1070// 16 x 16 signed multiply (like SMLAxx without the accumulate)
1071void ArmToMips64Assembler::SMUL(int cc __unused, int xy,
1072 int Rd, int Rm, int Rs)
1073{
1074 mArmPC[mInum++] = pc();
1075
1076 // the 16 bits may be in the top or bottom half of 32-bit source reg,
1077 // as defined by the codes BB, BT, TB, TT (compressed param xy)
1078 // where x corresponds to Rm and y to Rs
1079
1080 // select half-reg for Rm
1081 if (xy & xyTB) {
1082 // use top 16-bits
1083 mMips->SRA(R_at, Rm, 16);
1084 } else {
1085 // use bottom 16, but sign-extend to 32
1086 mMips->SEH(R_at, Rm);
1087 }
1088 // select half-reg for Rs
1089 if (xy & xyBT) {
1090 // use top 16-bits
1091 mMips->SRA(R_at2, Rs, 16);
1092 } else {
1093 // use bottom 16, but sign-extend to 32
1094 mMips->SEH(R_at2, Rs);
1095 }
1096 mMips->MUL(Rd, R_at, R_at2);
1097}
1098
1099// signed 32b x 16b multiple, save top 32-bits of 48-bit result
1100void ArmToMips64Assembler::SMULW(int cc __unused, int y,
1101 int Rd, int Rm, int Rs)
1102{
1103 mArmPC[mInum++] = pc();
1104
1105 // the selector yT or yB refers to reg Rs
1106 if (y & yT) {
1107 // zero the bottom 16-bits, with 2 shifts, it can affect result
1108 mMips->SRL(R_at, Rs, 16);
1109 mMips->SLL(R_at, R_at, 16);
1110
1111 } else {
1112 // move low 16-bit half, to high half
1113 mMips->SLL(R_at, Rs, 16);
1114 }
1115 mMips->MUH(Rd, Rm, R_at);
1116}
1117
1118// 16 x 16 signed multiply, accumulate: Rd = Rm{16} * Rs{16} + Rn
1119void ArmToMips64Assembler::SMLA(int cc __unused, int xy,
1120 int Rd, int Rm, int Rs, int Rn)
1121{
1122 mArmPC[mInum++] = pc();
1123
1124 // the 16 bits may be in the top or bottom half of 32-bit source reg,
1125 // as defined by the codes BB, BT, TB, TT (compressed param xy)
1126 // where x corresponds to Rm and y to Rs
1127
1128 // select half-reg for Rm
1129 if (xy & xyTB) {
1130 // use top 16-bits
1131 mMips->SRA(R_at, Rm, 16);
1132 } else {
1133 // use bottom 16, but sign-extend to 32
1134 mMips->SEH(R_at, Rm);
1135 }
1136 // select half-reg for Rs
1137 if (xy & xyBT) {
1138 // use top 16-bits
1139 mMips->SRA(R_at2, Rs, 16);
1140 } else {
1141 // use bottom 16, but sign-extend to 32
1142 mMips->SEH(R_at2, Rs);
1143 }
1144
1145 mMips->MUL(R_at, R_at, R_at2);
1146 mMips->ADDU(Rd, R_at, Rn);
1147}
1148
1149void ArmToMips64Assembler::SMLAL(int cc __unused, int xy __unused,
1150 int RdHi __unused, int RdLo __unused,
1151 int Rs __unused, int Rm __unused)
1152{
1153 // *mPC++ = (cc<<28) | 0x1400080 | (RdHi<<16) | (RdLo<<12) | (Rs<<8) | (xy<<4) | Rm;
1154 mArmPC[mInum++] = pc();
1155 mMips->NOP2();
1156 NOT_IMPLEMENTED();
1157}
1158
1159void ArmToMips64Assembler::SMLAW(int cc __unused, int y __unused,
1160 int Rd __unused, int Rm __unused,
1161 int Rs __unused, int Rn __unused)
1162{
1163 // *mPC++ = (cc<<28) | 0x1200080 | (Rd<<16) | (Rn<<12) | (Rs<<8) | (y<<4) | Rm;
1164 mArmPC[mInum++] = pc();
1165 mMips->NOP2();
1166 NOT_IMPLEMENTED();
1167}
1168
1169// used by ARMv6 version of GGLAssembler::filter32
1170void ArmToMips64Assembler::UXTB16(int cc __unused, int Rd, int Rm, int rotate)
1171{
1172 mArmPC[mInum++] = pc();
1173
1174 //Rd[31:16] := ZeroExtend((Rm ROR (8 * sh))[23:16]),
1175 //Rd[15:0] := ZeroExtend((Rm ROR (8 * sh))[7:0]). sh 0-3.
1176
1177 mMips->ROTR(R_at2, Rm, rotate * 8);
1178 mMips->LUI(R_at, 0xFF);
1179 mMips->ORI(R_at, R_at, 0xFF);
1180 mMips->AND(Rd, R_at2, R_at);
1181}
1182
1183void ArmToMips64Assembler::UBFX(int cc __unused, int Rd __unused, int Rn __unused,
1184 int lsb __unused, int width __unused)
1185{
1186 /* Placeholder for UBFX */
1187 mArmPC[mInum++] = pc();
1188
1189 mMips->NOP2();
1190 NOT_IMPLEMENTED();
1191}
1192
1193// ----------------------------------------------------------------------------
1194// Address Processing...
1195// ----------------------------------------------------------------------------
1196
1197void ArmToMips64Assembler::ADDR_ADD(int cc,
1198 int s, int Rd, int Rn, uint32_t Op2)
1199{
1200// if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1201// if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
1202 dataProcessing(opADD64, cc, s, Rd, Rn, Op2);
1203}
1204
1205void ArmToMips64Assembler::ADDR_SUB(int cc,
1206 int s, int Rd, int Rn, uint32_t Op2)
1207{
1208// if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1209// if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
1210 dataProcessing(opSUB64, cc, s, Rd, Rn, Op2);
1211}
1212
1213void ArmToMips64Assembler::ADDR_LDR(int cc __unused, int Rd,
1214 int Rn, uint32_t offset) {
1215 mArmPC[mInum++] = pc();
1216 // work-around for ARM default address mode of immed12_pre(0)
1217 if (offset > AMODE_UNSUPPORTED) offset = 0;
1218 switch (offset) {
1219 case 0:
1220 amode.value = 0;
1221 amode.writeback = 0;
1222 // fall thru to next case ....
1223 case AMODE_IMM_12_PRE:
1224 if (Rn == ARMAssemblerInterface::SP) {
1225 Rn = R_sp; // convert LDR via Arm SP to LW via Mips SP
1226 }
1227 mMips->LD(Rd, Rn, amode.value);
1228 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
1229 mMips->DADDIU(Rn, Rn, amode.value);
1230 }
1231 break;
1232 case AMODE_IMM_12_POST:
1233 if (Rn == ARMAssemblerInterface::SP) {
1234 Rn = R_sp; // convert STR thru Arm SP to STR thru Mips SP
1235 }
1236 mMips->LD(Rd, Rn, 0);
1237 mMips->DADDIU(Rn, Rn, amode.value);
1238 break;
1239 case AMODE_REG_SCALE_PRE:
1240 // we only support simple base + index, no advanced modes for this one yet
1241 mMips->DADDU(R_at, Rn, amode.reg);
1242 mMips->LD(Rd, R_at, 0);
1243 break;
1244 }
1245}
1246
1247void ArmToMips64Assembler::ADDR_STR(int cc __unused, int Rd,
1248 int Rn, uint32_t offset) {
1249 mArmPC[mInum++] = pc();
1250 // work-around for ARM default address mode of immed12_pre(0)
1251 if (offset > AMODE_UNSUPPORTED) offset = 0;
1252 switch (offset) {
1253 case 0:
1254 amode.value = 0;
1255 amode.writeback = 0;
1256 // fall thru to next case ....
1257 case AMODE_IMM_12_PRE:
1258 if (Rn == ARMAssemblerInterface::SP) {
1259 Rn = R_sp; // convert STR thru Arm SP to SW thru Mips SP
1260 }
1261 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
1262 // If we will writeback, then update the index reg, then store.
1263 // This correctly handles stack-push case.
1264 mMips->DADDIU(Rn, Rn, amode.value);
1265 mMips->SD(Rd, Rn, 0);
1266 } else {
1267 // No writeback so store offset by value
1268 mMips->SD(Rd, Rn, amode.value);
1269 }
1270 break;
1271 case AMODE_IMM_12_POST:
1272 mMips->SD(Rd, Rn, 0);
1273 mMips->DADDIU(Rn, Rn, amode.value); // post index always writes back
1274 break;
1275 case AMODE_REG_SCALE_PRE:
1276 // we only support simple base + index, no advanced modes for this one yet
1277 mMips->DADDU(R_at, Rn, amode.reg);
1278 mMips->SD(Rd, R_at, 0);
1279 break;
1280 }
1281}
1282
1283#if 0
1284#pragma mark -
1285#pragma mark MIPS Assembler...
1286#endif
1287
1288
1289//**************************************************************************
1290//**************************************************************************
1291//**************************************************************************
1292
1293
1294/* MIPS64 assembler
1295** this is a subset of mips64r6, targeted specifically at ARM instruction
1296** replacement in the pixelflinger/codeflinger code.
1297**
1298** This class is extended from MIPSAssembler class and overrides only
1299** MIPS64r6 specific stuff.
1300*/
1301
1302MIPS64Assembler::MIPS64Assembler(const sp<Assembly>& assembly, ArmToMips64Assembler *parent)
1303 : MIPSAssembler::MIPSAssembler(assembly, NULL), mParent(parent)
1304{
1305}
1306
1307MIPS64Assembler::MIPS64Assembler(void* assembly, ArmToMips64Assembler *parent)
1308 : MIPSAssembler::MIPSAssembler(assembly), mParent(parent)
1309{
1310}
1311
1312MIPS64Assembler::~MIPS64Assembler()
1313{
1314}
1315
1316void MIPS64Assembler::reset()
1317{
1318 if (mAssembly != NULL) {
1319 mBase = mPC = (uint32_t *)mAssembly->base();
1320 } else {
1321 mPC = mBase = base();
1322 }
1323 mBranchTargets.clear();
1324 mLabels.clear();
1325 mLabelsInverseMapping.clear();
1326 mComments.clear();
1327}
1328
1329
1330void MIPS64Assembler::disassemble(const char* name __unused)
1331{
1332 char di_buf[140];
1333
1334 bool arm_disasm_fmt = (mParent->mArmDisassemblyBuffer == NULL) ? false : true;
1335
1336 typedef char dstr[40];
1337 dstr *lines = (dstr *)mParent->mArmDisassemblyBuffer;
1338
1339 if (mParent->mArmDisassemblyBuffer != NULL) {
1340 for (int i=0; i<mParent->mArmInstrCount; ++i) {
1341 string_detab(lines[i]);
1342 }
1343 }
1344
1345 size_t count = pc()-base();
1346 uint32_t* mipsPC = base();
1347
1348 while (count--) {
1349 ssize_t label = mLabelsInverseMapping.indexOfKey(mipsPC);
1350 if (label >= 0) {
1351 ALOGW("%s:\n", mLabelsInverseMapping.valueAt(label));
1352 }
1353 ssize_t comment = mComments.indexOfKey(mipsPC);
1354 if (comment >= 0) {
1355 ALOGW("; %s\n", mComments.valueAt(comment));
1356 }
1357 ::mips_disassem(mipsPC, di_buf, arm_disasm_fmt);
1358 string_detab(di_buf);
1359 string_pad(di_buf, 30);
1360 ALOGW("%08lx: %08x %s", uintptr_t(mipsPC), uint32_t(*mipsPC), di_buf);
1361 mipsPC++;
1362 }
1363}
1364
1365void MIPS64Assembler::fix_branches()
1366{
1367 // fixup all the branches
1368 size_t count = mBranchTargets.size();
1369 while (count--) {
1370 const branch_target_t& bt = mBranchTargets[count];
1371 uint32_t* target_pc = mLabels.valueFor(bt.label);
1372 LOG_ALWAYS_FATAL_IF(!target_pc,
1373 "error resolving branch targets, target_pc is null");
1374 int32_t offset = int32_t(target_pc - (bt.pc+1));
1375 *bt.pc |= offset & 0x00FFFF;
1376 }
1377}
1378
1379void MIPS64Assembler::DADDU(int Rd, int Rs, int Rt)
1380{
1381 *mPC++ = (spec_op<<OP_SHF) | (daddu_fn<<FUNC_SHF)
1382 | (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF);
1383}
1384
1385void MIPS64Assembler::DADDIU(int Rt, int Rs, int16_t imm)
1386{
1387 *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1388}
1389
1390void MIPS64Assembler::DSUBU(int Rd, int Rs, int Rt)
1391{
1392 *mPC++ = (spec_op<<OP_SHF) | (dsubu_fn<<FUNC_SHF) |
1393 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1394}
1395
1396void MIPS64Assembler::DSUBIU(int Rt, int Rs, int16_t imm) // really addiu(d, s, -j)
1397{
1398 *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | ((-imm) & MSK_16);
1399}
1400
1401void MIPS64Assembler::MUL(int Rd, int Rs, int Rt)
1402{
1403 *mPC++ = (spec_op<<OP_SHF) | (mul_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1404 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1405}
1406
1407void MIPS64Assembler::MUH(int Rd, int Rs, int Rt)
1408{
1409 *mPC++ = (spec_op<<OP_SHF) | (muh_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1410 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1411}
1412
1413void MIPS64Assembler::CLO(int Rd, int Rs)
1414{
1415 *mPC++ = (spec_op<<OP_SHF) | (17<<FUNC_SHF) |
1416 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1417}
1418
1419void MIPS64Assembler::CLZ(int Rd, int Rs)
1420{
1421 *mPC++ = (spec_op<<OP_SHF) | (16<<FUNC_SHF) |
1422 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1423}
1424
1425void MIPS64Assembler::LD(int Rt, int Rbase, int16_t offset)
1426{
1427 *mPC++ = (ld_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1428}
1429
1430void MIPS64Assembler::SD(int Rt, int Rbase, int16_t offset)
1431{
1432 *mPC++ = (sd_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1433}
1434
1435void MIPS64Assembler::LUI(int Rt, int16_t offset)
1436{
1437 *mPC++ = (aui_op<<OP_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1438}
1439
1440
1441void MIPS64Assembler::JR(int Rs)
1442{
1443 *mPC++ = (spec_op<<OP_SHF) | (Rs<<RS_SHF) | (jalr_fn << FUNC_SHF);
1444 MIPS64Assembler::NOP();
1445}
1446
1447}; // namespace android: