Lines Matching refs:r9

108 // 	r9    ITBmiss/DTBmiss scratch
262 srl r25, isr_v_hlt, r9 // Get HLT bit
266 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
295 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
297 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
323 // This routine can use the PALshadow registers r8, r9, and r10
366 // This routine can use the PALshadow registers r8, r9, and r10
377 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
410 // r9 - original MMstat
574 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
582 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
584 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
585 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
591 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
593 and r9, 0x1F, r9 // isolate rnum
596 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
597 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
904 // r9 - mmstat<opcode> right justified
928 mfpr r9, pal_base
934 subq r9, r8, r8 // pal_base - offset
936 lda r9, pal_itb_ldq-pal_base(r8)
939 beq r9, dfault_do_bugcheck
940 lda r9, pal_dtb_ldq-pal_base(r8)
942 beq r9, dfault_do_bugcheck
1404 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
1413 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
1416 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
1442 and r9, 1, r13 // save r/w flag
1791 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
1859 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
1916 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
1928 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
2226 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2266 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
3867 lda r9, 1(r31) // set enclr flag
3871 bis r31, r31, r9 // clear enclr flag
3901 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
3919 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
3937 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
4044 LDLI(r9, (0xFFFFFFFF)) // ctr2<15:0>,ctr1<15:0> mask
4045 sll r9, pmctr_v_ctr1, r9
4046 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
4072 and r17, 63, r9
4073 bis r8, r9, r8
4090 xor r17, r16, r9
4091 and r9, 7, r9
4093 bne r9, unaligned
4095 ldq_u r9, 0(r16)
4098 mskql r9, r17, r9
4099 bis r8, r9, r8
4118 ldq_u r9, 0(r16)
4119 mskqh r9, r18, r9
4120 bis r10, r9, r10
4125 cmpule r18, 8, r9
4126 bne r9, unaligned_few_left
4131 ldq_u r9, 7(r17)
4133 extqh r9, r17, r9
4134 bis r8, r9, r12
4150 ldq_u r9, 7(r17)
4153 extqh r9, r17, r13
4161 extql r9, r17, r12
4169 mov r8, r9
4172 extql r9, r17, r9
4174 bis r8, r9, r8
4179 ldq_u r9, -1(r25)
4181 extqh r9, r17, r9
4182 bis r8, r9, r8
4183 insqh r8, r16, r9
4196 and r9, r13, r9
4198 bis r9, r25, r9
4199 stq_u r9, -1(r10)