1/*
2 * Copyright (c) 2003-2006 The Regents of The University of Michigan
3 * Copyright (c) 1992-1995 Hewlett-Packard Development Company
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Ali G. Saidi
30 *          Nathan L. Binkert
31 */
32
33// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
34// since we don't have a mechanism to expand the data structures.... pb Nov/95
35#include "ev5_defs.h"
36#include "ev5_impure.h"
37#include "ev5_alpha_defs.h"
38#include "ev5_paldef.h"
39#include "ev5_osfalpha_defs.h"
40#include "fromHudsonMacros.h"
41#include "fromHudsonOsf.h"
42#include "dc21164FromGasSources.h"
43
44#define DEBUGSTORE(c) nop
45
46#define DEBUG_EXC_ADDR()\
47        bsr	r25, put_exc_addr; \
48        DEBUGSTORE(13)		; \
49        DEBUGSTORE(10)
50
51// This is the fix for the user-mode super page references causing the
52// machine to crash.
53#define hw_rei_spe	hw_rei
54
55#define vmaj 1
56#define vmin 18
57#define vms_pal 1
58#define osf_pal 2
59#define pal_type osf_pal
60#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
61
62
63///////////////////////////
64// PALtemp register usage
65///////////////////////////
66
67//  The EV5 Ibox holds 24 PALtemp registers.  This maps the OSF PAL usage
68//  for these PALtemps:
69//
70//	pt0   local scratch
71//	pt1   local scratch
72//	pt2   entUna					pt_entUna
73//	pt3   CPU specific impure area pointer		pt_impure
74//	pt4   memory management temp
75//	pt5   memory management temp
76//	pt6   memory management temp
77//	pt7   entIF					pt_entIF
78//	pt8   intmask					pt_intmask
79//	pt9   entSys					pt_entSys
80//	pt10
81//	pt11  entInt					pt_entInt
82//	pt12  entArith					pt_entArith
83//	pt13  reserved for system specific PAL
84//	pt14  reserved for system specific PAL
85//	pt15  reserved for system specific PAL
86//	pt16  MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami,
87//                pt_mces
88//	pt17  sysval					pt_sysval
89//	pt18  usp					pt_usp
90//	pt19  ksp					pt_ksp
91//	pt20  PTBR					pt_ptbr
92//	pt21  entMM					pt_entMM
93//	pt22  kgp					pt_kgp
94//	pt23  PCBB					pt_pcbb
95//
96//
97
98
99/////////////////////////////
100// PALshadow register usage
101/////////////////////////////
102
103//
104// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
105// This maps the OSF PAL usage of R8 - R14 and R25:
106//
107// 	r8    ITBmiss/DTBmiss scratch
108// 	r9    ITBmiss/DTBmiss scratch
109// 	r10   ITBmiss/DTBmiss scratch
110//	r11   PS
111//	r12   local scratch
112//	r13   local scratch
113//	r14   local scratch
114//	r25   local scratch
115//
116
117
118
119// .sbttl	"PALcode configuration options"
120
121// There are a number of options that may be assembled into this version of
122// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
123// the following). The options that can be adjusted cause the resultant PALcode
124// to reflect the desired target system.
125
126// multiprocessor support can be enabled for a max of n processors by
127// setting the following to the number of processors on the system.
128// Note that this is really the max cpuid.
129
130#define max_cpuid 1
131#ifndef max_cpuid
132#define max_cpuid 8
133#endif
134
135#define osf_svmin 1
136#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
137
138//
139// RESET	-  Reset Trap Entry Point
140//
141// RESET - offset 0000
142// Entry:
143//	Vectored into via hardware trap on reset, or branched to
144//	on swppal.
145//
146//	r0 = whami
147//	r1 = pal_base
148//	r2 = base of scratch area
149//	r3 = halt code
150//
151//
152// Function:
153//
154//
155
156        .text	0
157        . = 0x0000
158        .globl _start
159        .globl Pal_Base
160_start:
161Pal_Base:
162        HDW_VECTOR(PAL_RESET_ENTRY)
163Trap_Reset:
164        nop
165        /*
166         * store into r1
167         */
168        br r1,sys_reset
169
170        // Specify PAL version info as a constant
171        // at a known location (reset + 8).
172
173        .long osfpal_version_l		// <pal_type@16> ! <vmaj@8> ! <vmin@0>
174        .long osfpal_version_h		// <max_cpuid@16> ! <osf_svmin@0>
175        .long 0
176        .long 0
177pal_impure_start:
178        .quad 0
179pal_debug_ptr:
180        .quad 0				// reserved for debug pointer ; 20
181
182
183//
184// IACCVIO - Istream Access Violation Trap Entry Point
185//
186// IACCVIO - offset 0080
187// Entry:
188//	Vectored into via hardware trap on Istream access violation or sign check error on PC.
189//
190// Function:
191//	Build stack frame
192//	a0 <- Faulting VA
193//	a1 <- MMCSR  (1 for ACV)
194//	a2 <- -1 (for ifetch fault)
195//	vector via entMM
196//
197
198        HDW_VECTOR(PAL_IACCVIO_ENTRY)
199Trap_Iaccvio:
200        DEBUGSTORE(0x42)
201        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
202        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
203
204        bis	r11, r31, r12		// Save PS
205        bge	r25, TRAP_IACCVIO_10_		// no stack swap needed if cm=kern
206
207
208        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
209                                        //     no virt ref for next 2 cycles
210        mtpr	r30, pt_usp		// save user stack
211
212        bis	r31, r31, r12		// Set new PS
213        mfpr	r30, pt_ksp
214
215TRAP_IACCVIO_10_:
216        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
217        mfpr	r14, exc_addr		// get pc
218
219        stq	r16, osfsf_a0(sp)	// save regs
220        bic	r14, 3, r16		// pass pc/va as a0
221
222        stq	r17, osfsf_a1(sp)	// a1
223        or	r31, mmcsr_c_acv, r17	// pass mm_csr as a1
224
225        stq	r18, osfsf_a2(sp) 	// a2
226        mfpr	r13, pt_entmm		// get entry point
227
228        stq	r11, osfsf_ps(sp)	// save old ps
229        bis	r12, r31, r11		// update ps
230
231        stq	r16, osfsf_pc(sp)	// save pc
232        stq	r29, osfsf_gp(sp) 	// save gp
233
234        mtpr	r13, exc_addr		// load exc_addr with entMM
235                                        // 1 cycle to hw_rei
236        mfpr	r29, pt_kgp		// get the kgp
237
238        subq	r31, 1, r18		// pass flag of istream, as a2
239        hw_rei_spe
240
241
242//
243// INTERRUPT - Interrupt Trap Entry Point
244//
245// INTERRUPT - offset 0100
246// Entry:
247//	Vectored into via trap on hardware interrupt
248//
249// Function:
250//	check for halt interrupt
251//	check for passive release (current ipl geq requestor)
252//	if necessary, switch to kernel mode push stack frame,
253//      update ps (including current mode and ipl copies), sp, and gp
254//	pass the interrupt info to the system module
255//
256//
257        HDW_VECTOR(PAL_INTERRUPT_ENTRY)
258Trap_Interrupt:
259        mfpr    r13, ev5__intid         // Fetch level of interruptor
260        mfpr    r25, ev5__isr           // Fetch interrupt summary register
261
262        srl     r25, isr_v_hlt, r9     // Get HLT bit
263        mfpr	r14, ev5__ipl
264
265        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kern
266        blbs    r9, sys_halt_interrupt	// halt_interrupt if HLT bit set
267
268        cmple   r13, r14, r8            // R8 = 1 if intid .less than or eql. ipl
269        bne     r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
270
271        and	r11, osfps_m_mode, r10 // get mode bit
272        beq	r10, TRAP_INTERRUPT_10_		// Skip stack swap in kernel
273
274        mtpr	r30, pt_usp		// save user stack
275        mfpr	r30, pt_ksp		// get kern stack
276
277TRAP_INTERRUPT_10_:
278        lda	sp, (0-osfsf_c_size)(sp)// allocate stack space
279        mfpr	r14, exc_addr		// get pc
280
281        stq	r11, osfsf_ps(sp) 	// save ps
282        stq	r14, osfsf_pc(sp) 	// save pc
283
284        stq     r29, osfsf_gp(sp)       // push gp
285        stq	r16, osfsf_a0(sp)	// a0
286
287//	pvc_violate 354			// ps is cleared anyway,  if store to stack faults.
288        mtpr    r31, ev5__ps            // Set Ibox current mode to kernel
289        stq	r17, osfsf_a1(sp)	// a1
290
291        stq	r18, osfsf_a2(sp) 	// a2
292        subq	r13, 0x11, r12		// Start to translate from EV5IPL->OSFIPL
293
294        srl	r12, 1, r8		// 1d, 1e: ipl 6.  1f: ipl 7.
295        subq	r13, 0x1d, r9		// Check for 1d, 1e, 1f
296
297        cmovge	r9, r8, r12		// if .ge. 1d, then take shifted value
298        bis	r12, r31, r11		// set new ps
299
300        mfpr	r12, pt_intmask
301        and	r11, osfps_m_ipl, r14	// Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
302
303        /*
304         * Lance had space problems. We don't.
305         */
306        extbl	r12, r14, r14		// Translate new OSFIPL->EV5IPL
307        mfpr	r29, pt_kgp		// update gp
308        mtpr	r14, ev5__ipl		// load the new IPL into Ibox
309        br	r31, sys_interrupt	// Go handle interrupt
310
311
312
313//
314// ITBMISS - Istream TBmiss Trap Entry Point
315//
316// ITBMISS - offset 0180
317// Entry:
318//	Vectored into via hardware trap on Istream translation buffer miss.
319//
320// Function:
321//       Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
322//       Can trap into DTBMISS_DOUBLE.
323//       This routine can use the PALshadow registers r8, r9, and r10
324//
325//
326
327        HDW_VECTOR(PAL_ITB_MISS_ENTRY)
328Trap_Itbmiss:
329        // Real MM mapping
330        nop
331        mfpr	r8, ev5__ifault_va_form // Get virtual address of PTE.
332
333        nop
334        mfpr    r10, exc_addr           // Get PC of faulting instruction in case of DTBmiss.
335
336pal_itb_ldq:
337        ld_vpte r8, 0(r8)             	// Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
338        mtpr	r10, exc_addr		// Restore exc_address if there was a trap.
339
340        mfpr	r31, ev5__va		// Unlock VA in case there was a double miss
341        nop
342
343        and	r8, osfpte_m_foe, r25 	// Look for FOE set.
344        blbc	r8, invalid_ipte_handler // PTE not valid.
345
346        nop
347        bne	r25, foe_ipte_handler	// FOE is set
348
349        nop
350        mtpr	r8, ev5__itb_pte	// Ibox remembers the VA, load the PTE into the ITB.
351
352        hw_rei_stall			//
353
354
355//
356// DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point
357//
358// DTBMISS_SINGLE - offset 0200
359// Entry:
360//	Vectored into via hardware trap on Dstream single translation
361//      buffer miss.
362//
363// Function:
364//	Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
365//	Can trap into DTBMISS_DOUBLE.
366//	This routine can use the PALshadow registers r8, r9, and r10
367//
368
369        HDW_VECTOR(PAL_DTB_MISS_ENTRY)
370Trap_Dtbmiss_Single:
371        mfpr	r8, ev5__va_form      	// Get virtual address of PTE - 1 cycle delay.  E0.
372        mfpr    r10, exc_addr           // Get PC of faulting instruction in case of error.  E1.
373
374//	DEBUGSTORE(0x45)
375//	DEBUG_EXC_ADDR()
376                                        // Real MM mapping
377        mfpr    r9, ev5__mm_stat	// Get read/write bit.  E0.
378        mtpr	r10, pt6		// Stash exc_addr away
379
380pal_dtb_ldq:
381        ld_vpte r8, 0(r8)             	// Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
382        nop				// Pad MF VA
383
384        mfpr	r10, ev5__va            // Get original faulting VA for TB load.  E0.
385        nop
386
387        mtpr    r8, ev5__dtb_pte       	// Write DTB PTE part.   E0.
388        blbc    r8, invalid_dpte_handler    // Handle invalid PTE
389
390        mtpr    r10, ev5__dtb_tag      	// Write DTB TAG part, completes DTB load.  No virt ref for 3 cycles.
391        mfpr	r10, pt6
392
393                                        // Following 2 instructions take 2 cycles
394        mtpr    r10, exc_addr           // Return linkage in case we trapped.  E1.
395        mfpr	r31,  pt0		// Pad the write to dtb_tag
396
397        hw_rei                          // Done, return
398
399
400//
401// DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point
402//
403//
404// DTBMISS_DOUBLE - offset 0280
405// Entry:
406//	Vectored into via hardware trap on Double TBmiss from single
407//      miss flows.
408//
409//	r8   - faulting VA
410//	r9   - original MMstat
411//	r10 - original exc_addr (both itb,dtb miss)
412//	pt6 - original exc_addr (dtb miss flow only)
413//	VA IPR - locked with original faulting VA
414//
415// Function:
416// 	Get PTE, if valid load TB and return.
417//	If not valid then take TNV/ACV exception.
418//
419//	pt4 and pt5 are reserved for this flow.
420//
421//
422//
423
424        HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
425Trap_Dtbmiss_double:
426        mtpr 	r8, pt4			// save r8 to do exc_addr check
427        mfpr	r8, exc_addr
428        blbc	r8, Trap_Dtbmiss_Single	//if not in palmode, should be in the single routine, dummy!
429        mfpr	r8, pt4			// restore r8
430        nop
431        mtpr	r22, pt5		// Get some scratch space. E1.
432                                        // Due to virtual scheme, we can skip the first lookup and go
433                                        // right to fetch of level 2 PTE
434        sll     r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22  // Clean off upper bits of VA
435        mtpr	r21, pt4		// Get some scratch space. E1.
436
437        srl    	r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
438        mfpr	r21, pt_ptbr		// Get physical address of the page table.
439
440        nop
441        addq    r21, r22, r21           // Index into page table for level 2 PTE.
442
443        sll    	r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22  // Clean off upper bits of VA
444        ldq_p   	r21, 0(r21)            	// Get level 2 PTE (addr<2:0> ignored)
445
446        srl    	r22, 61-page_seg_size_bits, r22	// Get Va<seg1>*8
447        blbc 	r21, double_pte_inv		// Check for Invalid PTE.
448
449        srl    	r21, 32, r21			// extract PFN from PTE
450        sll     r21, page_offset_size_bits, r21	// get PFN * 2^13 for add to <seg3>*8
451
452        addq    r21, r22, r21           // Index into page table for level 3 PTE.
453        nop
454
455        ldq_p   	r21, 0(r21)            	// Get level 3 PTE (addr<2:0> ignored)
456        blbc	r21, double_pte_inv	// Check for invalid PTE.
457
458        mtpr	r21, ev5__dtb_pte	// Write the PTE.  E0.
459        mfpr	r22, pt5		// Restore scratch register
460
461        mtpr	r8, ev5__dtb_tag	// Write the TAG. E0.  No virtual references in subsequent 3 cycles.
462        mfpr	r21, pt4		// Restore scratch register
463
464        nop				// Pad write to tag.
465        nop
466
467        nop				// Pad write to tag.
468        nop
469
470        hw_rei
471
472
473
474//
475// UNALIGN -- Dstream unalign trap
476//
477// UNALIGN - offset 0300
478// Entry:
479//	Vectored into via hardware trap on unaligned Dstream reference.
480//
481// Function:
482//	Build stack frame
483//	a0 <- Faulting VA
484//	a1 <- Opcode
485//	a2 <- src/dst register number
486//	vector via entUna
487//
488
489        HDW_VECTOR(PAL_UNALIGN_ENTRY)
490Trap_Unalign:
491/*	DEBUGSTORE(0x47)*/
492        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
493        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
494
495        mfpr	r8, ev5__mm_stat	// Get mmstat --ok to use r8, no tbmiss
496        mfpr	r14, exc_addr		// get pc
497
498        srl	r8, mm_stat_v_ra, r13	// Shift Ra field to ls bits
499        blbs	r14, pal_pal_bug_check  // Bugcheck if unaligned in PAL
500
501        blbs	r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
502                                        // not set, must be a load
503        and	r13, 0x1F, r8		// isolate ra
504
505        cmpeq   r8, 0x1F, r8		// check for r31/F31
506        bne     r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
507
508UNALIGN_NO_DISMISS:
509        bis	r11, r31, r12		// Save PS
510        bge	r25, UNALIGN_NO_DISMISS_10_		// no stack swap needed if cm=kern
511
512
513        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
514                                        //     no virt ref for next 2 cycles
515        mtpr	r30, pt_usp		// save user stack
516
517        bis	r31, r31, r12		// Set new PS
518        mfpr	r30, pt_ksp
519
520UNALIGN_NO_DISMISS_10_:
521        mfpr	r25, ev5__va		// Unlock VA
522        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
523
524        mtpr	r25, pt0		// Stash VA
525        stq	r18, osfsf_a2(sp) 	// a2
526
527        stq	r11, osfsf_ps(sp)	// save old ps
528        srl	r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
529
530        stq	r29, osfsf_gp(sp) 	// save gp
531        addq	r14, 4, r14		// inc PC past the ld/st
532
533        stq	r17, osfsf_a1(sp)	// a1
534        and	r25, mm_stat_m_opcode, r17// Clean opocde for a1
535
536        stq	r16, osfsf_a0(sp)	// save regs
537        mfpr	r16, pt0		// a0 <- va/unlock
538
539        stq	r14, osfsf_pc(sp)	// save pc
540        mfpr	r25, pt_entuna		// get entry point
541
542
543        bis	r12, r31, r11		// update ps
544        br 	r31, unalign_trap_cont
545
546
547//
548// DFAULT	- Dstream Fault Trap Entry Point
549//
550// DFAULT - offset 0380
551// Entry:
552//	Vectored into via hardware trap on dstream fault or sign check
553//      error on DVA.
554//
555// Function:
556//	Ignore faults on FETCH/FETCH_M
557//	Check for DFAULT in PAL
558//	Build stack frame
559//	a0 <- Faulting VA
560//	a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
561//	a2 <- R/W
562//	vector via entMM
563//
564//
565        HDW_VECTOR(PAL_D_FAULT_ENTRY)
566Trap_Dfault:
567//	DEBUGSTORE(0x48)
568        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
569        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
570
571        mfpr	r13, ev5__mm_stat	// Get mmstat
572        mfpr	r8, exc_addr		// get pc, preserve r14
573
574        srl	r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
575        blbs	r8, dfault_in_pal
576
577        bis	r8, r31, r14		// move exc_addr to correct place
578        bis	r11, r31, r12		// Save PS
579
580        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
581                                        //     no virt ref for next 2 cycles
582        and	r9, mm_stat_m_opcode, r9 // Clean all but opcode
583
584        cmpeq   r9, evx_opc_sync, r9 	// Is the opcode fetch/fetchm?
585        bne     r9, dfault_fetch_ldr31_err   // Yes, dismiss the fault
586
587        //dismiss exception if load to r31/f31
588        blbs	r13, dfault_no_dismiss	// mm_stat<0> set on store or fetchm
589
590                                        // not a store or fetch, must be a load
591        srl	r13, mm_stat_v_ra, r9	// Shift rnum to low bits
592
593        and	r9, 0x1F, r9		// isolate rnum
594        nop
595
596        cmpeq   r9, 0x1F, r9   	// Is the rnum r31 or f31?
597        bne     r9, dfault_fetch_ldr31_err    // Yes, dismiss the fault
598
599dfault_no_dismiss:
600        and	r13, 0xf, r13	// Clean extra bits in mm_stat
601        bge	r25, dfault_trap_cont	// no stack swap needed if cm=kern
602
603
604        mtpr	r30, pt_usp		// save user stack
605        bis	r31, r31, r12		// Set new PS
606
607        mfpr	r30, pt_ksp
608        br	r31, dfault_trap_cont
609
610
611//
612// MCHK	-  Machine Check Trap Entry Point
613//
614// MCHK - offset 0400
615// Entry:
616//	Vectored into via hardware trap on machine check.
617//
618// Function:
619//
620//
621
622        HDW_VECTOR(PAL_MCHK_ENTRY)
623Trap_Mchk:
624        DEBUGSTORE(0x49)
625        mtpr    r31, ic_flush_ctl       // Flush the Icache
626        br      r31, sys_machine_check
627
628
629//
630// OPCDEC	-  Illegal Opcode Trap Entry Point
631//
632// OPCDEC - offset 0480
633// Entry:
634//	Vectored into via hardware trap on illegal opcode.
635//
636//	Build stack frame
637//	a0 <- code
638//	a1 <- unpred
639//	a2 <- unpred
640//	vector via entIF
641//
642//
643
644        HDW_VECTOR(PAL_OPCDEC_ENTRY)
645Trap_Opcdec:
646        DEBUGSTORE(0x4a)
647//simos	DEBUG_EXC_ADDR()
648        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
649        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
650
651        mfpr	r14, exc_addr		// get pc
652        blbs	r14, pal_pal_bug_check	// check opcdec in palmode
653
654        bis	r11, r31, r12		// Save PS
655        bge	r25, TRAP_OPCDEC_10_		// no stack swap needed if cm=kern
656
657
658        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
659                                        //     no virt ref for next 2 cycles
660        mtpr	r30, pt_usp		// save user stack
661
662        bis	r31, r31, r12		// Set new PS
663        mfpr	r30, pt_ksp
664
665TRAP_OPCDEC_10_:
666        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
667        addq	r14, 4, r14		// inc pc
668
669        stq	r16, osfsf_a0(sp)	// save regs
670        bis	r31, osf_a0_opdec, r16	// set a0
671
672        stq	r11, osfsf_ps(sp)	// save old ps
673        mfpr	r13, pt_entif		// get entry point
674
675        stq	r18, osfsf_a2(sp) 	// a2
676        stq	r17, osfsf_a1(sp)	// a1
677
678        stq	r29, osfsf_gp(sp) 	// save gp
679        stq	r14, osfsf_pc(sp)	// save pc
680
681        bis	r12, r31, r11		// update ps
682        mtpr	r13, exc_addr		// load exc_addr with entIF
683                                        // 1 cycle to hw_rei, E1
684
685        mfpr	r29, pt_kgp		// get the kgp, E1
686
687        hw_rei_spe			// done, E1
688
689
690//
691// ARITH	-  Arithmetic Exception Trap Entry Point
692//
693// ARITH - offset 0500
694// Entry:
695//	Vectored into via hardware trap on arithmetic excpetion.
696//
697// Function:
698//	Build stack frame
699//	a0 <- exc_sum
700//	a1 <- exc_mask
701//	a2 <- unpred
702//	vector via entArith
703//
704//
705        HDW_VECTOR(PAL_ARITH_ENTRY)
706Trap_Arith:
707        DEBUGSTORE(0x4b)
708        and	r11, osfps_m_mode, r12 // get mode bit
709        mfpr	r31, ev5__va		// unlock mbox
710
711        bis	r11, r31, r25		// save ps
712        mfpr	r14, exc_addr		// get pc
713
714        nop
715        blbs	r14, pal_pal_bug_check	// arith trap from PAL
716
717        mtpr    r31, ev5__dtb_cm        // Set Mbox current mode to kernel -
718                                        //     no virt ref for next 2 cycles
719        beq	r12, TRAP_ARITH_10_		// if zero we are in kern now
720
721        bis	r31, r31, r25		// set the new ps
722        mtpr	r30, pt_usp		// save user stack
723
724        nop
725        mfpr	r30, pt_ksp		// get kern stack
726
727TRAP_ARITH_10_: 	lda	sp, 0-osfsf_c_size(sp)	// allocate stack space
728        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
729
730        nop				// Pad current mode write and stq
731        mfpr	r13, ev5__exc_sum	// get the exc_sum
732
733        mfpr	r12, pt_entarith
734        stq	r14, osfsf_pc(sp)	// save pc
735
736        stq	r17, osfsf_a1(sp)
737        mfpr    r17, ev5__exc_mask      // Get exception register mask IPR - no mtpr exc_sum in next cycle
738
739        stq	r11, osfsf_ps(sp)	// save ps
740        bis	r25, r31, r11		// set new ps
741
742        stq	r16, osfsf_a0(sp)	// save regs
743        srl	r13, exc_sum_v_swc, r16	// shift data to correct position
744
745        stq	r18, osfsf_a2(sp)
746//	pvc_violate 354			// ok, but make sure reads of exc_mask/sum are not in same trap shadow
747        mtpr	r31, ev5__exc_sum	// Unlock exc_sum and exc_mask
748
749        stq	r29, osfsf_gp(sp)
750        mtpr	r12, exc_addr		// Set new PC - 1 bubble to hw_rei - E1
751
752        mfpr	r29, pt_kgp		// get the kern gp - E1
753        hw_rei_spe			// done - E1
754
755
756//
757// FEN	-  Illegal Floating Point Operation Trap Entry Point
758//
759// FEN - offset 0580
760// Entry:
761//	Vectored into via hardware trap on illegal FP op.
762//
763// Function:
764//	Build stack frame
765//	a0 <- code
766//	a1 <- unpred
767//	a2 <- unpred
768//	vector via entIF
769//
770//
771
772        HDW_VECTOR(PAL_FEN_ENTRY)
773Trap_Fen:
774        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
775        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
776
777        mfpr	r14, exc_addr		// get pc
778        blbs	r14, pal_pal_bug_check	// check opcdec in palmode
779
780        mfpr	r13, ev5__icsr
781        nop
782
783        bis	r11, r31, r12		// Save PS
784        bge	r25, TRAP_FEN_10_		// no stack swap needed if cm=kern
785
786        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
787                                        //     no virt ref for next 2 cycles
788        mtpr	r30, pt_usp		// save user stack
789
790        bis	r31, r31, r12		// Set new PS
791        mfpr	r30, pt_ksp
792
793TRAP_FEN_10_:
794        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
795        srl     r13, icsr_v_fpe, r25   // Shift FP enable to bit 0
796
797
798        stq	r16, osfsf_a0(sp)	// save regs
799        mfpr	r13, pt_entif		// get entry point
800
801        stq	r18, osfsf_a2(sp) 	// a2
802        stq	r11, osfsf_ps(sp)	// save old ps
803
804        stq	r29, osfsf_gp(sp) 	// save gp
805        bis	r12, r31, r11		// set new ps
806
807        stq	r17, osfsf_a1(sp)	// a1
808        blbs	r25,fen_to_opcdec	// If FP is enabled, this is really OPCDEC.
809
810        bis	r31, osf_a0_fen, r16	// set a0
811        stq	r14, osfsf_pc(sp)	// save pc
812
813        mtpr	r13, exc_addr		// load exc_addr with entIF
814                                        // 1 cycle to hw_rei -E1
815
816        mfpr	r29, pt_kgp		// get the kgp -E1
817
818        hw_rei_spe			// done -E1
819
820//	FEN trap was taken, but the fault is really opcdec.
821        ALIGN_BRANCH
822fen_to_opcdec:
823        addq	r14, 4, r14		// save PC+4
824        bis	r31, osf_a0_opdec, r16	// set a0
825
826        stq	r14, osfsf_pc(sp)	// save pc
827        mtpr	r13, exc_addr		// load exc_addr with entIF
828                                        // 1 cycle to hw_rei
829
830        mfpr	r29, pt_kgp		// get the kgp
831        hw_rei_spe			// done
832
833
834
835//////////////////////////////////////////////////////////////////////////////
836// Misc handlers - Start area for misc code.
837//////////////////////////////////////////////////////////////////////////////
838
839//
840// dfault_trap_cont
841//	A dfault trap has been taken.  The sp has been updated if necessary.
842//	Push a stack frame a vector via entMM.
843//
844//	Current state:
845//		r12 - new PS
846//		r13 - MMstat
847//		VA - locked
848//
849//
850        ALIGN_BLOCK
851dfault_trap_cont:
852        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
853        mfpr	r25, ev5__va		// Fetch VA/unlock
854
855        stq	r18, osfsf_a2(sp) 	// a2
856        and	r13, 1, r18		// Clean r/w bit for a2
857
858        stq	r16, osfsf_a0(sp)	// save regs
859        bis	r25, r31, r16		// a0 <- va
860
861        stq	r17, osfsf_a1(sp)	// a1
862        srl	r13, 1, r17		// shift fault bits to right position
863
864        stq	r11, osfsf_ps(sp)	// save old ps
865        bis	r12, r31, r11		// update ps
866
867        stq	r14, osfsf_pc(sp)	// save pc
868        mfpr	r25, pt_entmm		// get entry point
869
870        stq	r29, osfsf_gp(sp) 	// save gp
871        cmovlbs	r17, 1, r17		// a2. acv overrides fox.
872
873        mtpr	r25, exc_addr		// load exc_addr with entMM
874                                        // 1 cycle to hw_rei
875        mfpr	r29, pt_kgp		// get the kgp
876
877        hw_rei_spe			// done
878
879//
880//unalign_trap_cont
881//	An unalign trap has been taken.  Just need to finish up a few things.
882//
883//	Current state:
884//		r25 - entUna
885//		r13 - shifted MMstat
886//
887//
888        ALIGN_BLOCK
889unalign_trap_cont:
890        mtpr	r25, exc_addr		// load exc_addr with entUna
891                                        // 1 cycle to hw_rei
892
893
894        mfpr	r29, pt_kgp		// get the kgp
895        and	r13, mm_stat_m_ra, r18	// Clean Ra for a2
896
897        hw_rei_spe			// done
898
899
900
901//
902// dfault_in_pal
903//	Dfault trap was taken, exc_addr points to a PAL PC.
904//	r9 - mmstat<opcode> right justified
905//	r8 - exception address
906//
907//	These are the cases:
908//		opcode was STQ -- from a stack builder, KSP not valid halt
909//			r14 - original exc_addr
910//			r11 - original PS
911//		opcode was STL_C  -- rti or retsys clear lock_flag by stack write,
912//					KSP not valid halt
913//			r11 - original PS
914//			r14 - original exc_addr
915//		opcode was LDQ -- retsys or rti stack read, KSP not valid halt
916//			r11 - original PS
917//			r14 - original exc_addr
918//		opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
919//			r10 - original exc_addr
920//			r11 - original PS
921//
922//
923//
924        ALIGN_BLOCK
925dfault_in_pal:
926        DEBUGSTORE(0x50)
927        bic     r8, 3, r8            // Clean PC
928        mfpr	r9, pal_base
929
930        mfpr	r31, va			// unlock VA
931
932        // if not real_mm, should never get here from miss flows
933
934        subq    r9, r8, r8            // pal_base - offset
935
936        lda     r9, pal_itb_ldq-pal_base(r8)
937        nop
938
939        beq 	r9, dfault_do_bugcheck
940        lda     r9, pal_dtb_ldq-pal_base(r8)
941
942        beq 	r9, dfault_do_bugcheck
943
944//
945// KSP invalid halt case --
946ksp_inval_halt:
947        DEBUGSTORE(76)
948        bic	r11, osfps_m_mode, r11	// set ps to kernel mode
949        mtpr    r0, pt0
950
951        mtpr	r31, dtb_cm		// Make sure that the CM IPRs are all kernel mode
952        mtpr	r31, ips
953
954        mtpr	r14, exc_addr		// Set PC to instruction that caused trouble
955        bsr     r0, pal_update_pcb      // update the pcb
956
957        lda     r0, hlt_c_ksp_inval(r31)  // set halt code to hw halt
958        br      r31, sys_enter_console  // enter the console
959
960        ALIGN_BRANCH
961dfault_do_bugcheck:
962        bis	r10, r31, r14		// bugcheck expects exc_addr in r14
963        br	r31, pal_pal_bug_check
964
965
966//
967// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
968//	On entry -
969//		r14 - exc_addr
970//		VA is locked
971//
972//
973        ALIGN_BLOCK
974dfault_fetch_ldr31_err:
975        mtpr	r11, ev5__dtb_cm
976        mtpr	r11, ev5__ps		// Make sure ps hasn't changed
977
978        mfpr	r31, va			// unlock the mbox
979        addq	r14, 4, r14		// inc the pc to skip the fetch
980
981        mtpr	r14, exc_addr		// give ibox new PC
982        mfpr	r31, pt0		// pad exc_addr write
983
984        hw_rei
985
986
987
988        ALIGN_BLOCK
989//
990// sys_from_kern
991//	callsys from kernel mode - OS bugcheck machine check
992//
993//
994sys_from_kern:
995        mfpr	r14, exc_addr			// PC points to call_pal
996        subq	r14, 4, r14
997
998        lda	r25, mchk_c_os_bugcheck(r31)    // fetch mchk code
999        br      r31, pal_pal_mchk
1000
1001
1002// Continuation of long call_pal flows
1003//
1004// wrent_tbl
1005//	Table to write *int in paltemps.
1006//	4 instructions/entry
1007//	r16 has new value
1008//
1009//
1010        ALIGN_BLOCK
1011wrent_tbl:
1012//orig	pvc_jsr	wrent, dest=1
1013        nop
1014        mtpr	r16, pt_entint
1015
1016        mfpr	r31, pt0		// Pad for mt->mf paltemp rule
1017        hw_rei
1018
1019
1020//orig	pvc_jsr	wrent, dest=1
1021        nop
1022        mtpr	r16, pt_entarith
1023
1024        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1025        hw_rei
1026
1027
1028//orig	pvc_jsr	wrent, dest=1
1029        nop
1030        mtpr	r16, pt_entmm
1031
1032        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1033        hw_rei
1034
1035
1036//orig	pvc_jsr	wrent, dest=1
1037        nop
1038        mtpr	r16, pt_entif
1039
1040        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1041        hw_rei
1042
1043
1044//orig	pvc_jsr	wrent, dest=1
1045        nop
1046        mtpr	r16, pt_entuna
1047
1048        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1049        hw_rei
1050
1051
1052//orig	pvc_jsr	wrent, dest=1
1053        nop
1054        mtpr	r16, pt_entsys
1055
1056        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1057        hw_rei
1058
1059        ALIGN_BLOCK
1060//
1061// tbi_tbl
1062//	Table to do tbi instructions
1063//	4 instructions per entry
1064//
1065tbi_tbl:
1066        // -2 tbia
1067//orig	pvc_jsr tbi, dest=1
1068        mtpr	r31, ev5__dtb_ia	// Flush DTB
1069        mtpr	r31, ev5__itb_ia	// Flush ITB
1070
1071        hw_rei_stall
1072
1073        nop				// Pad table
1074
1075        // -1 tbiap
1076//orig	pvc_jsr tbi, dest=1
1077        mtpr	r31, ev5__dtb_iap	// Flush DTB
1078        mtpr	r31, ev5__itb_iap	// Flush ITB
1079
1080        hw_rei_stall
1081
1082        nop				// Pad table
1083
1084
1085        // 0 unused
1086//orig	pvc_jsr tbi, dest=1
1087        hw_rei				// Pad table
1088        nop
1089        nop
1090        nop
1091
1092
1093        // 1 tbisi
1094//orig	pvc_jsr tbi, dest=1
1095
1096        nop
1097        nop
1098        mtpr	r17, ev5__itb_is	// Flush ITB
1099        hw_rei_stall
1100
1101        // 2 tbisd
1102//orig	pvc_jsr tbi, dest=1
1103        mtpr	r17, ev5__dtb_is	// Flush DTB.
1104        nop
1105
1106        nop
1107        hw_rei_stall
1108
1109
1110        // 3 tbis
1111//orig	pvc_jsr tbi, dest=1
1112        mtpr	r17, ev5__dtb_is	// Flush DTB
1113        br	r31, tbi_finish
1114        ALIGN_BRANCH
1115tbi_finish:
1116        mtpr	r17, ev5__itb_is	// Flush ITB
1117        hw_rei_stall
1118
1119
1120
1121        ALIGN_BLOCK
1122//
1123// bpt_bchk_common:
1124//	Finish up the bpt/bchk instructions
1125//
1126bpt_bchk_common:
1127        stq	r18, osfsf_a2(sp) 	// a2
1128        mfpr	r13, pt_entif		// get entry point
1129
1130        stq	r12, osfsf_ps(sp)	// save old ps
1131        stq	r14, osfsf_pc(sp)	// save pc
1132
1133        stq	r29, osfsf_gp(sp) 	// save gp
1134        mtpr	r13, exc_addr		// load exc_addr with entIF
1135                                        // 1 cycle to hw_rei
1136
1137        mfpr	r29, pt_kgp		// get the kgp
1138
1139
1140        hw_rei_spe			// done
1141
1142
1143        ALIGN_BLOCK
1144//
1145// rti_to_user
1146//	Finish up the rti instruction
1147//
1148rti_to_user:
1149        mtpr	r11, ev5__dtb_cm	// set Mbox current mode - no virt ref for 2 cycles
1150        mtpr	r11, ev5__ps		// set Ibox current mode - 2 bubble to hw_rei
1151
1152        mtpr	r31, ev5__ipl		// set the ipl. No hw_rei for 2 cycles
1153        mtpr	r25, pt_ksp		// save off incase RTI to user
1154
1155        mfpr	r30, pt_usp
1156        hw_rei_spe			// and back
1157
1158
1159        ALIGN_BLOCK
1160//
1161// rti_to_kern
1162//	Finish up the rti instruction
1163//
1164rti_to_kern:
1165        and	r12, osfps_m_ipl, r11	// clean ps
1166        mfpr	r12, pt_intmask		// get int mask
1167
1168        extbl	r12, r11, r12		// get mask for this ipl
1169        mtpr	r25, pt_ksp		// save off incase RTI to user
1170
1171        mtpr	r12, ev5__ipl		// set the new ipl.
1172        or	r25, r31, sp		// sp
1173
1174//	pvc_violate 217			// possible hidden mt->mf ipl not a problem in callpals
1175        hw_rei
1176
1177        ALIGN_BLOCK
1178//
1179// swpctx_cont
1180//	Finish up the swpctx instruction
1181//
1182
1183swpctx_cont:
1184
1185        bic	r25, r24, r25		// clean icsr<FPE,PMP>
1186        sll	r12, icsr_v_fpe, r12	// shift new fen to pos
1187
1188        ldq_p	r14, osfpcb_q_mmptr(r16)// get new mmptr
1189        srl	r22, osfpcb_v_pme, r22	// get pme down to bit 0
1190
1191        or	r25, r12, r25		// icsr with new fen
1192        srl	r23, 32, r24		// move asn to low asn pos
1193
1194        and	r22, 1, r22
1195        sll	r24, itb_asn_v_asn, r12
1196
1197        sll	r22, icsr_v_pmp, r22
1198        nop
1199
1200        or	r25, r22, r25		// icsr with new pme
1201
1202        sll	r24, dtb_asn_v_asn, r24
1203
1204        subl	r23, r13, r13		// gen new cc offset
1205        mtpr	r12, itb_asn		// no hw_rei_stall in 0,1,2,3,4
1206
1207        mtpr	r24, dtb_asn		// Load up new ASN
1208        mtpr	r25, icsr		// write the icsr
1209
1210        sll	r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1211        ldq_p	r25, osfpcb_q_usp(r16)	// get new usp
1212
1213        insll	r13, 4, r13		// >> 32
1214//	pvc_violate 379			// ldq_p can't trap except replay.  only problem if mf same ipr in same shadow
1215        mtpr	r14, pt_ptbr		// load the new ptbr
1216
1217        mtpr	r13, cc			// set new offset
1218        ldq_p	r30, osfpcb_q_ksp(r16)	// get new ksp
1219
1220//	pvc_violate 379			// ldq_p can't trap except replay.  only problem if mf same ipr in same shadow
1221        mtpr	r25, pt_usp		// save usp
1222
1223no_pm_change_10_:	hw_rei_stall			// back we go
1224
1225        ALIGN_BLOCK
1226//
1227// swppal_cont - finish up the swppal call_pal
1228//
1229
1230swppal_cont:
1231        mfpr	r2, pt_misc		// get misc bits
1232        sll	r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1233        or	r2, r0, r2		// set the bit
1234        mtpr	r31, ev5__alt_mode	// ensure alt_mode set to 0 (kernel)
1235        mtpr	r2, pt_misc		// update the chip
1236
1237        or	r3, r31, r4
1238        mfpr	r3, pt_impure		// pass pointer to the impure area in r3
1239//orig	fix_impure_ipr	r3		// adjust impure pointer for ipr read
1240//orig	restore_reg1	bc_ctl, r1, r3, ipr=1		// pass cns_bc_ctl in r1
1241//orig	restore_reg1	bc_config, r2, r3, ipr=1	// pass cns_bc_config in r2
1242//orig	unfix_impure_ipr r3		// restore impure pointer
1243        lda	r3, CNS_Q_IPR(r3)
1244        RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1245        RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1246        lda	r3, -CNS_Q_IPR(r3)
1247
1248        or	r31, r31, r0		// set status to success
1249//	pvc_violate	1007
1250        jmp	r31, (r4)		// and call our friend, it's her problem now
1251
1252
1253swppal_fail:
1254        addq	r0, 1, r0		// set unknown pal or not loaded
1255        hw_rei				// and return
1256
1257
1258// .sbttl	"Memory management"
1259
1260        ALIGN_BLOCK
1261//
1262//foe_ipte_handler
1263// IFOE detected on level 3 pte, sort out FOE vs ACV
1264//
1265// on entry:
1266//	with
1267//	R8	 = pte
1268//	R10	 = pc
1269//
1270// Function
1271//	Determine TNV vs ACV vs FOE. Build stack and dispatch
1272//	Will not be here if TNV.
1273//
1274
1275foe_ipte_handler:
1276        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1277        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1278
1279        bis	r11, r31, r12		// Save PS for stack write
1280        bge	r25, foe_ipte_handler_10_		// no stack swap needed if cm=kern
1281
1282
1283        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1284                                        //     no virt ref for next 2 cycles
1285        mtpr	r30, pt_usp		// save user stack
1286
1287        bis	r31, r31, r11		// Set new PS
1288        mfpr	r30, pt_ksp
1289
1290        srl	r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1291        nop
1292
1293foe_ipte_handler_10_:	srl	r8, osfpte_v_kre, r25	// get kre to <0>
1294        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1295
1296        or	r10, r31, r14		// Save pc/va in case TBmiss or fault on stack
1297        mfpr	r13, pt_entmm		// get entry point
1298
1299        stq	r16, osfsf_a0(sp)	// a0
1300        or	r14, r31, r16		// pass pc/va as a0
1301
1302        stq	r17, osfsf_a1(sp)	// a1
1303        nop
1304
1305        stq	r18, osfsf_a2(sp) 	// a2
1306        lda	r17, mmcsr_c_acv(r31)	// assume ACV
1307
1308        stq	r16, osfsf_pc(sp)	// save pc
1309        cmovlbs r25, mmcsr_c_foe, r17	// otherwise FOE
1310
1311        stq	r12, osfsf_ps(sp)	// save ps
1312        subq	r31, 1, r18		// pass flag of istream as a2
1313
1314        stq	r29, osfsf_gp(sp)
1315        mtpr	r13, exc_addr		// set vector address
1316
1317        mfpr	r29, pt_kgp		// load kgp
1318        hw_rei_spe			// out to exec
1319
1320        ALIGN_BLOCK
1321//
1322//invalid_ipte_handler
1323// TNV detected on level 3 pte, sort out TNV vs ACV
1324//
1325// on entry:
1326//	with
1327//	R8	 = pte
1328//	R10	 = pc
1329//
1330// Function
1331//	Determine TNV vs ACV. Build stack and dispatch.
1332//
1333
1334invalid_ipte_handler:
1335        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1336        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1337
1338        bis	r11, r31, r12		// Save PS for stack write
1339        bge	r25, invalid_ipte_handler_10_		// no stack swap needed if cm=kern
1340
1341
1342        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1343                                        //     no virt ref for next 2 cycles
1344        mtpr	r30, pt_usp		// save user stack
1345
1346        bis	r31, r31, r11		// Set new PS
1347        mfpr	r30, pt_ksp
1348
1349        srl	r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1350        nop
1351
1352invalid_ipte_handler_10_:	srl	r8, osfpte_v_kre, r25	// get kre to <0>
1353        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1354
1355        or	r10, r31, r14		// Save pc/va in case TBmiss on stack
1356        mfpr	r13, pt_entmm		// get entry point
1357
1358        stq	r16, osfsf_a0(sp)	// a0
1359        or	r14, r31, r16		// pass pc/va as a0
1360
1361        stq	r17, osfsf_a1(sp)	// a1
1362        nop
1363
1364        stq	r18, osfsf_a2(sp) 	// a2
1365        and	r25, 1, r17		// Isolate kre
1366
1367        stq	r16, osfsf_pc(sp)	// save pc
1368        xor	r17, 1, r17		// map to acv/tnv as a1
1369
1370        stq	r12, osfsf_ps(sp)	// save ps
1371        subq	r31, 1, r18		// pass flag of istream as a2
1372
1373        stq	r29, osfsf_gp(sp)
1374        mtpr	r13, exc_addr		// set vector address
1375
1376        mfpr	r29, pt_kgp		// load kgp
1377        hw_rei_spe			// out to exec
1378
1379
1380
1381
1382        ALIGN_BLOCK
1383//
1384//invalid_dpte_handler
1385// INVALID detected on level 3 pte, sort out TNV vs ACV
1386//
1387// on entry:
1388//	with
1389//	R10	 = va
1390//	R8	 = pte
1391//	R9	 = mm_stat
1392//	PT6	 = pc
1393//
1394// Function
1395//	Determine TNV vs ACV. Build stack and dispatch
1396//
1397
1398
1399invalid_dpte_handler:
1400        mfpr	r12, pt6
1401        blbs	r12, tnv_in_pal		// Special handler if original faulting reference was in PALmode
1402
1403        bis	r12, r31, r14		// save PC in case of tbmiss or fault
1404        srl	r9, mm_stat_v_opcode, r25	// shift opc to <0>
1405
1406        mtpr	r11, pt0		// Save PS for stack write
1407        and 	r25, mm_stat_m_opcode, r25	// isolate opcode
1408
1409        cmpeq	r25, evx_opc_sync, r25	// is it FETCH/FETCH_M?
1410        blbs	r25, nmiss_fetch_ldr31_err	// yes
1411
1412        //dismiss exception if load to r31/f31
1413        blbs	r9, invalid_dpte_no_dismiss	// mm_stat<0> set on store or fetchm
1414
1415                                        // not a store or fetch, must be a load
1416        srl	r9, mm_stat_v_ra, r25	// Shift rnum to low bits
1417
1418        and	r25, 0x1F, r25		// isolate rnum
1419        nop
1420
1421        cmpeq   r25, 0x1F, r25  	// Is the rnum r31 or f31?
1422        bne     r25, nmiss_fetch_ldr31_err    // Yes, dismiss the fault
1423
1424invalid_dpte_no_dismiss:
1425        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1426        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1427
1428        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1429                                        //     no virt ref for next 2 cycles
1430        bge	r25, invalid_dpte_no_dismiss_10_		// no stack swap needed if cm=kern
1431
1432        srl	r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1433        mtpr	r30, pt_usp		// save user stack
1434
1435        bis	r31, r31, r11		// Set new PS
1436        mfpr	r30, pt_ksp
1437
1438invalid_dpte_no_dismiss_10_:	srl	r8, osfpte_v_kre, r12	// get kre to <0>
1439        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1440
1441        or	r10, r31, r25		// Save va in case TBmiss on stack
1442        and	r9, 1, r13		// save r/w flag
1443
1444        stq	r16, osfsf_a0(sp)	// a0
1445        or	r25, r31, r16		// pass va as a0
1446
1447        stq	r17, osfsf_a1(sp)	// a1
1448        or	r31, mmcsr_c_acv, r17 	// assume acv
1449
1450        srl	r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
1451        stq	r29, osfsf_gp(sp)
1452
1453        stq	r18, osfsf_a2(sp) 	// a2
1454        cmovlbs r13, r25, r12		// if write access move acv based on write enable
1455
1456        or	r13, r31, r18		// pass flag of dstream access and read vs write
1457        mfpr	r25, pt0		// get ps
1458
1459        stq	r14, osfsf_pc(sp)	// save pc
1460        mfpr	r13, pt_entmm		// get entry point
1461
1462        stq	r25, osfsf_ps(sp)	// save ps
1463        mtpr	r13, exc_addr		// set vector address
1464
1465        mfpr	r29, pt_kgp		// load kgp
1466        cmovlbs	r12, mmcsr_c_tnv, r17 	// make p2 be tnv if access ok else acv
1467
1468        hw_rei_spe			// out to exec
1469
1470//
1471//
1472// We come here if we are erring on a dtb_miss, and the instr is a
1473// fetch, fetch_m, of load to r31/f31.
1474// The PC is incremented, and we return to the program.
1475// essentially ignoring the instruction and error.
1476//
1477//
1478        ALIGN_BLOCK
1479nmiss_fetch_ldr31_err:
1480        mfpr	r12, pt6
1481        addq	r12, 4, r12		// bump pc to pc+4
1482
1483        mtpr	r12, exc_addr		// and set entry point
1484        mfpr	r31, pt0		// pad exc_addr write
1485
1486        hw_rei				//
1487
1488        ALIGN_BLOCK
1489//
1490// double_pte_inv
1491//	We had a single tbmiss which turned into a double tbmiss which found
1492//	an invalid PTE.  Return to single miss with a fake pte, and the invalid
1493//	single miss flow will report the error.
1494//
1495// on entry:
1496//	r21  	PTE
1497//	r22	available
1498//	VA IPR	locked with original fault VA
1499//       pt4  	saved r21
1500//	pt5  	saved r22
1501//	pt6	original exc_addr
1502//
1503// on return to tbmiss flow:
1504//	r8	fake PTE
1505//
1506//
1507//
1508double_pte_inv:
1509        srl	r21, osfpte_v_kre, r21	// get the kre bit to <0>
1510        mfpr	r22, exc_addr		// get the pc
1511
1512        lda	r22, 4(r22)		// inc the pc
1513        lda	r8, osfpte_m_prot(r31)	 // make a fake pte with xre and xwe set
1514
1515        cmovlbc r21, r31, r8		// set to all 0 for acv if pte<kre> is 0
1516        mtpr	r22, exc_addr		// set for rei
1517
1518        mfpr	r21, pt4		// restore regs
1519        mfpr	r22, pt5		// restore regs
1520
1521        hw_rei				// back to tb miss
1522
1523        ALIGN_BLOCK
1524//
1525//tnv_in_pal
1526//	The only places in pal that ld or store are the
1527// 	stack builders, rti or retsys.  Any of these mean we
1528//	need to take a ksp not valid halt.
1529//
1530//
1531tnv_in_pal:
1532
1533
1534        br	r31, ksp_inval_halt
1535
1536
1537// .sbttl	"Icache flush routines"
1538
1539        ALIGN_BLOCK
1540//
1541// Common Icache flush routine.
1542//
1543//
1544//
1545pal_ic_flush:
1546        nop
1547        mtpr	r31, ev5__ic_flush_ctl		// Icache flush - E1
1548        nop
1549        nop
1550
1551// Now, do 44 NOPs.  3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
1552        nop
1553        nop
1554        nop
1555        nop
1556
1557        nop
1558        nop
1559        nop
1560        nop
1561
1562        nop
1563        nop		// 10
1564
1565        nop
1566        nop
1567        nop
1568        nop
1569
1570        nop
1571        nop
1572        nop
1573        nop
1574
1575        nop
1576        nop		// 20
1577
1578        nop
1579        nop
1580        nop
1581        nop
1582
1583        nop
1584        nop
1585        nop
1586        nop
1587
1588        nop
1589        nop		// 30
1590        nop
1591        nop
1592        nop
1593        nop
1594
1595        nop
1596        nop
1597        nop
1598        nop
1599
1600        nop
1601        nop		// 40
1602
1603        nop
1604        nop
1605
1606one_cycle_and_hw_rei:
1607        nop
1608        nop
1609
1610        hw_rei_stall
1611
1612        ALIGN_BLOCK
1613//
1614//osfpal_calpal_opcdec
1615//  Here for all opcdec CALL_PALs
1616//
1617//	Build stack frame
1618//	a0 <- code
1619//	a1 <- unpred
1620//	a2 <- unpred
1621//	vector via entIF
1622//
1623//
1624
1625osfpal_calpal_opcdec:
1626        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1627        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1628
1629        mfpr	r14, exc_addr		// get pc
1630        nop
1631
1632        bis	r11, r31, r12		// Save PS for stack write
1633        bge	r25, osfpal_calpal_opcdec_10_		// no stack swap needed if cm=kern
1634
1635
1636        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1637                                        //     no virt ref for next 2 cycles
1638        mtpr	r30, pt_usp		// save user stack
1639
1640        bis	r31, r31, r11		// Set new PS
1641        mfpr	r30, pt_ksp
1642
1643osfpal_calpal_opcdec_10_:
1644        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1645        nop
1646
1647        stq	r16, osfsf_a0(sp)	// save regs
1648        bis	r31, osf_a0_opdec, r16	// set a0
1649
1650        stq	r18, osfsf_a2(sp) 	// a2
1651        mfpr	r13, pt_entif		// get entry point
1652
1653        stq	r12, osfsf_ps(sp)	// save old ps
1654        stq	r17, osfsf_a1(sp)	// a1
1655
1656        stq	r14, osfsf_pc(sp)	// save pc
1657        nop
1658
1659        stq	r29, osfsf_gp(sp) 	// save gp
1660        mtpr	r13, exc_addr		// load exc_addr with entIF
1661                                        // 1 cycle to hw_rei
1662
1663        mfpr	r29, pt_kgp		// get the kgp
1664
1665
1666        hw_rei_spe			// done
1667
1668
1669
1670
1671
1672//
1673//pal_update_pcb
1674//	Update the PCB with the current SP, AST, and CC info
1675//
1676//	r0 - return linkage
1677//
1678        ALIGN_BLOCK
1679
1680pal_update_pcb:
1681        mfpr	r12, pt_pcbb		// get pcbb
1682        and	r11, osfps_m_mode, r25	// get mode
1683        beq	r25, pal_update_pcb_10_		// in kern? no need to update user sp
1684        mtpr	r30, pt_usp		// save user stack
1685        stq_p	r30, osfpcb_q_usp(r12)	// store usp
1686        br	r31, pal_update_pcb_20_		// join common
1687pal_update_pcb_10_:	stq_p	r30, osfpcb_q_ksp(r12)	// store ksp
1688pal_update_pcb_20_:	rpcc	r13			// get cyccounter
1689        srl	r13, 32, r14		// move offset
1690        addl	r13, r14, r14		// merge for new time
1691        stl_p	r14, osfpcb_l_cc(r12)	// save time
1692
1693//orig	pvc_jsr	updpcb, bsr=1, dest=1
1694        ret	r31, (r0)
1695
1696
1697//
1698// pal_save_state
1699//
1700//	Function
1701//		All chip state saved, all PT's, SR's FR's, IPR's
1702//
1703//
1704// Regs' on entry...
1705//
1706//	R0 	= halt code
1707//	pt0	= r0
1708//	R1	= pointer to impure
1709//	pt4	= r1
1710//	R3	= return addr
1711//	pt5	= r3
1712//
1713//	register usage:
1714//		r0 = halt_code
1715//		r1 = addr of impure area
1716//		r3 = return_address
1717//		r4 = scratch
1718//
1719//
1720
1721        ALIGN_BLOCK
1722        .globl pal_save_state
1723pal_save_state:
1724//
1725//
1726// start of implementation independent save routine
1727//
1728// 		the impure area is larger than the addressibility of hw_ld and hw_st
1729//		therefore, we need to play some games:  The impure area
1730//		is informally divided into the "machine independent" part and the
1731//		"machine dependent" part.  The state that will be saved in the
1732//    		"machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use  (un)fix_impure_gpr macros).
1733//		All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
1734//		The impure pointer will need to be adjusted by a different offset for each.  The store/restore_reg
1735//		macros will automagically adjust the offset correctly.
1736//
1737
1738// The distributed code is commented out and followed by corresponding SRC code.
1739// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
1740
1741//orig	fix_impure_gpr	r1		// adjust impure area pointer for stores to "gpr" part of impure area
1742        lda	r1, 0x200(r1)		// Point to center of CPU segment
1743//orig	store_reg1 flag, r31, r1, ipr=1	// clear dump area flag
1744        SAVE_GPR(r31,CNS_Q_FLAG,r1)	// Clear the valid flag
1745//orig	store_reg1 hlt, r0, r1, ipr=1
1746        SAVE_GPR(r0,CNS_Q_HALT,r1)	// Save the halt code
1747
1748        mfpr	r0, pt0			// get r0 back			//orig
1749//orig	store_reg1 0, r0, r1		// save r0
1750        SAVE_GPR(r0,CNS_Q_GPR+0x00,r1)	// Save r0
1751
1752        mfpr	r0, pt4			// get r1 back			//orig
1753//orig	store_reg1 1, r0, r1		// save r1
1754        SAVE_GPR(r0,CNS_Q_GPR+0x08,r1)	// Save r1
1755
1756//orig	store_reg 2			// save r2
1757        SAVE_GPR(r2,CNS_Q_GPR+0x10,r1)	// Save r2
1758
1759        mfpr	r0, pt5			// get r3 back			//orig
1760//orig	store_reg1 3, r0, r1		// save r3
1761        SAVE_GPR(r0,CNS_Q_GPR+0x18,r1)	// Save r3
1762
1763        // reason code has been saved
1764        // r0 has been saved
1765        // r1 has been saved
1766        // r2 has been saved
1767        // r3 has been saved
1768        // pt0, pt4, pt5 have been lost
1769
1770        //
1771        // Get out of shadow mode
1772        //
1773
1774        mfpr	r2, icsr		// Get icsr
1775        ldah	r0, (1<<(icsr_v_sde-16))(r31)
1776        bic	r2, r0, r0		// ICSR with SDE clear
1777        mtpr	r0, icsr		// Turn off SDE
1778
1779        mfpr	r31, pt0		// SDE bubble cycle 1
1780        mfpr	r31, pt0		// SDE bubble cycle 2
1781        mfpr	r31, pt0		// SDE bubble cycle 3
1782        nop
1783
1784
1785        // save integer regs R4-r31
1786        SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
1787        SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
1788        SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
1789        SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
1790        SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
1791        SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
1792        SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
1793        SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
1794        SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
1795        SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
1796        SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
1797        SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
1798        SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
1799        SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
1800        SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
1801        SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
1802        SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
1803        SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
1804        SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
1805        SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
1806        SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
1807        SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
1808        SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
1809        SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
1810        SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
1811        SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
1812        SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
1813        SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
1814
1815        // save all paltemp regs except pt0
1816
1817//orig	unfix_impure_gpr	r1		// adjust impure area pointer for gpr stores
1818//orig	fix_impure_ipr	r1			// adjust impure area pointer for pt stores
1819
1820        lda	r1, -0x200(r1)		// Restore the impure base address.
1821        lda	r1, CNS_Q_IPR(r1)	// Point to the base of IPR area.
1822        SAVE_IPR(pt0,CNS_Q_PT+0x00,r1)		// the osf code didn't save/restore palTemp 0 ?? pboyle
1823        SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
1824        SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
1825        SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
1826        SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
1827        SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
1828        SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
1829        SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
1830        SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
1831        SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
1832        SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
1833        SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
1834        SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
1835        SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
1836        SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
1837        SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
1838        SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
1839        SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
1840        SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
1841        SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
1842        SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
1843        SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
1844        SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
1845        SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
1846
1847        // Restore shadow mode
1848        mfpr	r31, pt0		// pad write to icsr out of shadow of store (trap does not abort write)
1849        mfpr	r31, pt0
1850        mtpr	r2, icsr		// Restore original ICSR
1851
1852        mfpr	r31, pt0		// SDE bubble cycle 1
1853        mfpr	r31, pt0		// SDE bubble cycle 2
1854        mfpr	r31, pt0		// SDE bubble cycle 3
1855        nop
1856
1857        // save all integer shadow regs
1858        SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1)	// also called p0...p7 in the Hudson code
1859        SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
1860        SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
1861        SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
1862        SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
1863        SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
1864        SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
1865        SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
1866
1867        SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
1868        SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
1869        SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
1870        SAVE_IPR(va,CNS_Q_VA,r1)
1871        SAVE_IPR(icsr,CNS_Q_ICSR,r1)
1872        SAVE_IPR(ipl,CNS_Q_IPL,r1)
1873        SAVE_IPR(ips,CNS_Q_IPS,r1)
1874        SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
1875        SAVE_IPR(aster,CNS_Q_ASTER,r1)
1876        SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
1877        SAVE_IPR(sirr,CNS_Q_SIRR,r1)
1878        SAVE_IPR(isr,CNS_Q_ISR,r1)
1879        SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
1880        SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
1881        SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
1882
1883//orig	pvc_violate 379			// mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
1884//orig	store_reg maf_mode,	ipr=1	// save ipr -- no mbox instructions for
1885//orig                                  // PVC violation applies only to
1886pvc$osf35$379:				    // loads. HW_ST ok here, so ignore
1887        SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
1888
1889
1890        //the following iprs are informational only -- will not be restored
1891
1892        SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
1893        SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
1894        SAVE_IPR(intId,CNS_Q_INT_ID,r1)
1895        SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
1896        SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
1897        ldah	r14, 0xFFF0(zero)
1898        zap	r14, 0xE0, r14		// Get base address of CBOX IPRs
1899        NOP				// Pad mfpr dcPerr out of shadow of
1900        NOP				// last store
1901        NOP
1902        SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
1903
1904        // read cbox ipr state
1905
1906        mb
1907        ldq_p	r2, scCtl(r14)
1908        ldq_p	r13, ldLock(r14)
1909        ldq_p	r4, scAddr(r14)
1910        ldq_p	r5, eiAddr(r14)
1911        ldq_p	r6, bcTagAddr(r14)
1912        ldq_p	r7, fillSyn(r14)
1913        bis	r5, r4, zero		// Make sure all loads complete before
1914        bis	r7, r6, zero		// reading registers that unlock them.
1915        ldq_p	r8, scStat(r14)		// Unlocks scAddr.
1916        ldq_p	r9, eiStat(r14)		// Unlocks eiAddr, bcTagAddr, fillSyn.
1917        ldq_p	zero, eiStat(r14)	// Make sure it is really unlocked.
1918        mb
1919
1920        // save cbox ipr state
1921        SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
1922        SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
1923        SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
1924        SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
1925        SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
1926        SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
1927        SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
1928        SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
1929        //bc_config? sl_rcv?
1930
1931// restore impure base
1932//orig	unfix_impure_ipr r1
1933        lda	r1, -CNS_Q_IPR(r1)
1934
1935// save all floating regs
1936        mfpr	r0, icsr		// get icsr
1937        or	r31, 1, r2		// get a one
1938        sll	r2, icsr_v_fpe, r2	// Shift it into ICSR<FPE> position
1939        or	r2, r0, r0		// set FEN on
1940        mtpr	r0, icsr		// write to icsr, enabling FEN
1941
1942// map the save area virtually
1943        mtpr	r31, dtbIa		// Clear all DTB entries
1944        srl	r1, va_s_off, r0	// Clean off byte-within-page offset
1945        sll	r0, pte_v_pfn, r0	// Shift to form PFN
1946        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
1947        mtpr	r0, dtbPte		// Load the PTE and set valid
1948        mtpr	r1, dtbTag		// Write the PTE and tag into the DTB
1949
1950
1951// map the next page too - in case the impure area crosses a page boundary
1952        lda	r4, (1<<va_s_off)(r1)	// Generate address for next page
1953        srl	r4, va_s_off, r0	// Clean off byte-within-page offset
1954        sll	r0, pte_v_pfn, r0	// Shift to form PFN
1955        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
1956        mtpr	r0, dtbPte		// Load the PTE and set valid
1957        mtpr	r4, dtbTag		// Write the PTE and tag into the DTB
1958
1959        sll	r31, 0, r31		// stall cycle 1
1960        sll	r31, 0, r31		// stall cycle 2
1961        sll	r31, 0, r31		// stall cycle 3
1962        nop
1963
1964// add offset for saving fpr regs
1965//orig	fix_impure_gpr r1
1966        lda	r1, 0x200(r1)		// Point to center of CPU segment
1967
1968// now save the regs - F0-F31
1969        mf_fpcr  f0			// original
1970
1971        SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
1972        SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
1973        SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
1974        SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
1975        SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
1976        SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
1977        SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
1978        SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
1979        SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
1980        SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
1981        SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
1982        SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
1983        SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
1984        SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
1985        SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
1986        SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
1987        SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
1988        SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
1989        SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
1990        SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
1991        SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
1992        SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
1993        SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
1994        SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
1995        SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
1996        SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
1997        SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
1998        SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
1999        SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2000        SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2001        SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2002        SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2003
2004//switch impure offset from gpr to ipr---
2005//orig	unfix_impure_gpr	r1
2006//orig	fix_impure_ipr	r1
2007//orig	store_reg1 fpcsr, f0, r1, fpcsr=1
2008
2009        SAVE_FPR(f0,CNS_Q_FPCSR,r1)	// fpcsr loaded above into f0 -- can it reach
2010        lda	r1, -0x200(r1)		// Restore the impure base address
2011
2012// and back to gpr ---
2013//orig	unfix_impure_ipr	r1
2014//orig	fix_impure_gpr	r1
2015
2016//orig	lda	r0, cns_mchksize(r31)	// get size of mchk area
2017//orig	store_reg1 mchkflag, r0, r1, ipr=1
2018//orig	mb
2019
2020        lda	r1, CNS_Q_IPR(r1)	// Point to base of IPR area again
2021        // save this using the IPR base (it is closer) not the GRP base as they used...pb
2022        lda	r0, MACHINE_CHECK_SIZE(r31)	// get size of mchk area
2023        SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2024        mb
2025
2026//orig	or	r31, 1, r0		// get a one
2027//orig	store_reg1 flag, r0, r1, ipr=1	// set dump area flag
2028//orig	mb
2029
2030        lda	r1, -CNS_Q_IPR(r1)	// back to the base
2031        lda	r1, 0x200(r1)		// Point to center of CPU segment
2032        or	r31, 1, r0		// get a one
2033        SAVE_GPR(r0,CNS_Q_FLAG,r1)	// // set dump area valid flag
2034        mb
2035
2036        // restore impure area base
2037//orig	unfix_impure_gpr r1
2038        lda	r1, -0x200(r1)		// Point to center of CPU segment
2039
2040        mtpr	r31, dtb_ia		// clear the dtb
2041        mtpr	r31, itb_ia		// clear the itb
2042
2043//orig	pvc_jsr	savsta, bsr=1, dest=1
2044        ret	r31, (r3)		// and back we go
2045
2046
2047
2048// .sbttl  "PAL_RESTORE_STATE"
2049//
2050//
2051//	Pal_restore_state
2052//
2053//
2054//	register usage:
2055//		r1 = addr of impure area
2056//		r3 = return_address
2057//		all other regs are scratchable, as they are about to
2058//		be reloaded from ram.
2059//
2060//	Function:
2061//		All chip state restored, all SRs, FRs, PTs, IPRs
2062//					*** except R1, R3, PT0, PT4, PT5 ***
2063//
2064//
2065        ALIGN_BLOCK
2066pal_restore_state:
2067
2068//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2069
2070// map the console io area virtually
2071        mtpr	r31, dtbIa		// Clear all DTB entries
2072        srl	r1, va_s_off, r0	// Clean off byte-within-page offset
2073        sll	r0, pte_v_pfn, r0	// Shift to form PFN
2074        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
2075        mtpr	r0, dtbPte		// Load the PTE and set valid
2076        mtpr	r1, dtbTag		// Write the PTE and tag into the DTB
2077
2078
2079// map the next page too, in case impure area crosses page boundary
2080        lda	r4, (1<<VA_S_OFF)(r1)	// Generate address for next page
2081        srl	r4, va_s_off, r0	// Clean off byte-within-page offset
2082        sll	r0, pte_v_pfn, r0	// Shift to form PFN
2083        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
2084        mtpr	r0, dtbPte		// Load the PTE and set valid
2085        mtpr	r4, dtbTag		// Write the PTE and tag into the DTB
2086
2087// save all floating regs
2088        mfpr	r0, icsr		// Get current ICSR
2089        bis	zero, 1, r2		// Get a '1'
2090        or	r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
2091        sll	r2, icsr_v_fpe, r2	// Shift bits into position
2092        bis	r2, r2, r0		// Set ICSR<SDE> and ICSR<FPE>
2093        mtpr	r0, icsr		// Update the chip
2094
2095        mfpr	r31, pt0		// FPE bubble cycle 1		//orig
2096        mfpr	r31, pt0		// FPE bubble cycle 2		//orig
2097        mfpr	r31, pt0		// FPE bubble cycle 3		//orig
2098
2099//orig	fix_impure_ipr r1
2100//orig	restore_reg1 fpcsr, f0, r1, fpcsr=1
2101//orig	mt_fpcr  f0
2102//orig
2103//orig	unfix_impure_ipr r1
2104//orig	fix_impure_gpr r1		// adjust impure pointer offset for gpr access
2105        lda	r1, 200(r1)	// Point to base of IPR area again
2106        RESTORE_FPR(f0,CNS_Q_FPCSR,r1)		// can it reach?? pb
2107        mt_fpcr  f0			// original
2108
2109        lda	r1, 0x200(r1)		// point to center of CPU segment
2110
2111// restore all floating regs
2112        RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
2113        RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
2114        RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
2115        RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
2116        RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
2117        RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
2118        RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
2119        RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
2120        RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
2121        RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
2122        RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
2123        RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
2124        RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
2125        RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
2126        RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
2127        RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
2128        RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
2129        RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
2130        RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
2131        RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
2132        RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2133        RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2134        RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2135        RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2136        RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2137        RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2138        RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2139        RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2140        RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2141        RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2142        RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2143        RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2144
2145// switch impure pointer from gpr to ipr area --
2146//orig	unfix_impure_gpr r1
2147//orig	fix_impure_ipr r1
2148        lda	r1, -0x200(r1)		// Restore base address of impure area.
2149        lda	r1, CNS_Q_IPR(r1)	// Point to base of IPR area.
2150
2151// restore all pal regs
2152        RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1)		// the osf code didn't save/restore palTemp 0 ?? pboyle
2153        RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
2154        RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
2155        RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
2156        RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
2157        RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
2158        RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
2159        RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
2160        RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
2161        RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
2162        RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
2163        RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
2164        RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
2165        RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
2166        RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
2167        RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
2168        RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
2169        RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
2170        RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
2171        RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
2172        RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2173        RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2174        RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2175        RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2176
2177
2178//orig	restore_reg exc_addr,	ipr=1	// restore ipr
2179//orig	restore_reg pal_base,	ipr=1	// restore ipr
2180//orig	restore_reg ipl,	ipr=1	// restore ipr
2181//orig	restore_reg ps,		ipr=1	// restore ipr
2182//orig	mtpr	r0, dtb_cm		// set current mode in mbox too
2183//orig	restore_reg itb_asn,	ipr=1
2184//orig	srl	r0, itb_asn_v_asn, r0
2185//orig	sll	r0, dtb_asn_v_asn, r0
2186//orig	mtpr	r0, dtb_asn		// set ASN in Mbox too
2187//orig	restore_reg ivptbr,	ipr=1
2188//orig	mtpr	r0, mvptbr			// use ivptbr value to restore mvptbr
2189//orig	restore_reg mcsr,	ipr=1
2190//orig	restore_reg aster,	ipr=1
2191//orig	restore_reg astrr,	ipr=1
2192//orig	restore_reg sirr,	ipr=1
2193//orig	restore_reg maf_mode, 	ipr=1		// no mbox instruction for 3 cycles
2194//orig	mfpr	r31, pt0			// (may issue with mt maf_mode)
2195//orig	mfpr	r31, pt0			// bubble cycle 1
2196//orig	mfpr	r31, pt0                        // bubble cycle 2
2197//orig	mfpr	r31, pt0                        // bubble cycle 3
2198//orig	mfpr	r31, pt0			// (may issue with following ld)
2199
2200        // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
2201        RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2202        RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2203        RESTORE_IPR(ipl,CNS_Q_IPL,r1)
2204        RESTORE_IPR(ips,CNS_Q_IPS,r1)
2205        mtpr	r0, dtbCm			// Set Mbox current mode too.
2206        RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2207        srl	r0, 4, r0
2208        sll	r0, 57, r0
2209        mtpr	r0, dtbAsn			// Set Mbox ASN too
2210        RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2211        mtpr	r0, mVptBr			// Set Mbox VptBr too
2212        RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
2213        RESTORE_IPR(aster,CNS_Q_ASTER,r1)
2214        RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
2215        RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
2216        RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
2217        STALL
2218        STALL
2219        STALL
2220        STALL
2221        STALL
2222
2223
2224        // restore all integer shadow regs
2225        RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1)	// also called p0...p7 in the Hudson code
2226        RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2227        RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2228        RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2229        RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2230        RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2231        RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2232        RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2233        RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2234
2235        //
2236        // Get out of shadow mode
2237        //
2238
2239        mfpr	r31, pt0		// pad last load to icsr write (in case of replay, icsr will be written anyway)
2240        mfpr	r31, pt0		// ""
2241        mfpr	r0, icsr		// Get icsr
2242        ldah	r2,  (1<<(ICSR_V_SDE-16))(r31)	// Get a one in SHADOW_ENABLE bit location
2243        bic	r0, r2, r2		// ICSR with SDE clear
2244        mtpr	r2, icsr		// Turn off SDE - no palshadow rd/wr for 3 bubble cycles
2245
2246        mfpr	r31, pt0		// SDE bubble cycle 1
2247        mfpr	r31, pt0		// SDE bubble cycle 2
2248        mfpr	r31, pt0		// SDE bubble cycle 3
2249        nop
2250
2251// switch impure pointer from ipr to gpr area --
2252//orig	unfix_impure_ipr	r1
2253//orig	fix_impure_gpr	r1
2254
2255// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
2256
2257        lda	r1, -CNS_Q_IPR(r1)	// Restore base address of impure area
2258        lda	r1, 0x200(r1)		// Point to center of CPU segment
2259
2260        // restore all integer regs
2261        RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
2262        RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
2263        RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
2264        RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
2265        RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
2266        RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
2267        RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
2268        RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
2269        RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
2270        RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
2271        RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
2272        RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
2273        RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
2274        RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
2275        RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
2276        RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
2277        RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2278        RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2279        RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2280        RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2281        RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2282        RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2283        RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2284        RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2285        RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2286        RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2287        RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2288        RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2289
2290//orig	// switch impure pointer from gpr to ipr area --
2291//orig	unfix_impure_gpr	r1
2292//orig	fix_impure_ipr	r1
2293//orig	restore_reg icsr, ipr=1		// restore original icsr- 4 bubbles to hw_rei
2294
2295        lda	t0, -0x200(t0)		// Restore base address of impure area.
2296        lda	t0, CNS_Q_IPR(t0)	// Point to base of IPR area again.
2297        RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
2298
2299//orig	// and back again --
2300//orig	unfix_impure_ipr	r1
2301//orig	fix_impure_gpr	r1
2302//orig	store_reg1 	flag, r31, r1, ipr=1 // clear dump area valid flag
2303//orig	mb
2304
2305        lda	t0, -CNS_Q_IPR(t0)	// Back to base of impure area again,
2306        lda	t0, 0x200(t0)		// and back to center of CPU segment
2307        SAVE_GPR(r31,CNS_Q_FLAG,r1)	// Clear the dump area valid flag
2308        mb
2309
2310//orig	// and back we go
2311//orig//	restore_reg 3
2312//orig	restore_reg 2
2313//orig//	restore_reg 1
2314//orig	restore_reg 0
2315//orig	// restore impure area base
2316//orig	unfix_impure_gpr r1
2317
2318        RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
2319        RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
2320        lda	r1, -0x200(r1)		// Restore impure base address
2321
2322        mfpr	r31, pt0		// stall for ldq_p above		//orig
2323
2324        mtpr	r31, dtb_ia		// clear the tb			//orig
2325        mtpr	r31, itb_ia		// clear the itb		//orig
2326
2327//orig	pvc_jsr	rststa, bsr=1, dest=1
2328        ret	r31, (r3)		// back we go			//orig
2329
2330
2331//
2332// pal_pal_bug_check -- code has found a bugcheck situation.
2333//	Set things up and join common machine check flow.
2334//
2335// Input:
2336//	r14 	- exc_addr
2337//
2338// On exit:
2339//	pt0	- saved r0
2340//	pt1	- saved	r1
2341//	pt4	- saved r4
2342//	pt5	- saved r5
2343//	pt6	- saved r6
2344//	pt10	- saved exc_addr
2345//       pt_misc<47:32> - mchk code
2346//       pt_misc<31:16> - scb vector
2347//	r14	- base of Cbox IPRs in IO space
2348//	MCES<mchk> is set
2349//
2350
2351                ALIGN_BLOCK
2352        .globl pal_pal_bug_check_from_int
2353pal_pal_bug_check_from_int:
2354        DEBUGSTORE(0x79)
2355//simos	DEBUG_EXC_ADDR()
2356        DEBUGSTORE(0x20)
2357//simos	bsr	r25, put_hex
2358        lda	r25, mchk_c_bugcheck(r31)
2359        addq	r25, 1, r25			// set flag indicating we came from interrupt and stack is already pushed
2360        br	r31, pal_pal_mchk
2361        nop
2362
2363pal_pal_bug_check:
2364        lda     r25, mchk_c_bugcheck(r31)
2365
2366pal_pal_mchk:
2367        sll	r25, 32, r25			// Move mchk code to position
2368
2369        mtpr	r14, pt10			// Stash exc_addr
2370        mtpr	r14, exc_addr
2371
2372        mfpr	r12, pt_misc			// Get MCES and scratch
2373        zap	r12, 0x3c, r12
2374
2375        or	r12, r25, r12			// Combine mchk code
2376        lda	r25, scb_v_procmchk(r31)	// Get SCB vector
2377
2378        sll	r25, 16, r25			// Move SCBv to position
2379        or	r12, r25, r25			// Combine SCBv
2380
2381        mtpr	r0, pt0				// Stash for scratch
2382        bis	r25, mces_m_mchk, r25	// Set MCES<MCHK> bit
2383
2384        mtpr	r25, pt_misc			// Save mchk code!scbv!whami!mces
2385        ldah	r14, 0xfff0(r31)
2386
2387        mtpr	r1, pt1				// Stash for scratch
2388        zap	r14, 0xE0, r14			// Get Cbox IPR base
2389
2390        mtpr	r4, pt4
2391        mtpr	r5, pt5
2392
2393        mtpr	r6, pt6
2394        blbs	r12, sys_double_machine_check   // MCHK halt if double machine check
2395
2396        br	r31, sys_mchk_collect_iprs	// Join common machine check flow
2397
2398
2399
2400//	align_to_call_pal_section
2401//      Align to address of first call_pal entry point - 2000
2402
2403//
2404// HALT	- PALcode for HALT instruction
2405//
2406// Entry:
2407//	Vectored into via hardware PALcode instruction dispatch.
2408//
2409// Function:
2410//	GO to console code
2411//
2412//
2413
2414        .text	1
2415//	. = 0x2000
2416       CALL_PAL_PRIV(PAL_HALT_ENTRY)
2417call_pal_halt:
2418        mfpr	r31, pt0		// Pad exc_addr read
2419        mfpr	r31, pt0
2420
2421        mfpr	r12, exc_addr		// get PC
2422        subq	r12, 4, r12		// Point to the HALT
2423
2424        mtpr	r12, exc_addr
2425        mtpr	r0, pt0
2426
2427//orig	pvc_jsr updpcb, bsr=1
2428        bsr    r0, pal_update_pcb      	// update the pcb
2429        lda    r0, hlt_c_sw_halt(r31)  	// set halt code to sw halt
2430        br     r31, sys_enter_console  	// enter the console
2431
2432//
2433// CFLUSH - PALcode for CFLUSH instruction
2434//
2435// Entry:
2436//	Vectored into via hardware PALcode instruction dispatch.
2437//
2438//	R16 - contains the PFN of the page to be flushed
2439//
2440// Function:
2441//	Flush all Dstream caches of 1 entire page
2442//	The CFLUSH routine is in the system specific module.
2443//
2444//
2445
2446        CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
2447Call_Pal_Cflush:
2448        br	r31, sys_cflush
2449
2450//
2451// DRAINA	- PALcode for DRAINA instruction
2452//
2453// Entry:
2454//	Vectored into via hardware PALcode instruction dispatch.
2455//	Implicit TRAPB performed by hardware.
2456//
2457// Function:
2458//	Stall instruction issue until all prior instructions are guaranteed to
2459//	complete without incurring aborts.  For the EV5 implementation, this
2460//	means waiting until all pending DREADS are returned.
2461//
2462//
2463
2464        CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
2465Call_Pal_Draina:
2466        ldah	r14, 0x100(r31)		// Init counter.  Value?
2467        nop
2468
2469DRAINA_LOOP:
2470        subq	r14, 1, r14		// Decrement counter
2471        mfpr	r13, ev5__maf_mode	// Fetch status bit
2472
2473        srl	r13, maf_mode_v_dread_pending, r13
2474        ble	r14, DRAINA_LOOP_TOO_LONG
2475
2476        nop
2477        blbs	r13, DRAINA_LOOP	// Wait until all DREADS clear
2478
2479        hw_rei
2480
2481DRAINA_LOOP_TOO_LONG:
2482        br	r31, call_pal_halt
2483
2484// CALL_PAL OPCDECs
2485
2486        CALL_PAL_PRIV(0x0003)
2487CallPal_OpcDec03:
2488        br	r31, osfpal_calpal_opcdec
2489
2490        CALL_PAL_PRIV(0x0004)
2491CallPal_OpcDec04:
2492        br	r31, osfpal_calpal_opcdec
2493
2494        CALL_PAL_PRIV(0x0005)
2495CallPal_OpcDec05:
2496        br	r31, osfpal_calpal_opcdec
2497
2498        CALL_PAL_PRIV(0x0006)
2499CallPal_OpcDec06:
2500        br	r31, osfpal_calpal_opcdec
2501
2502        CALL_PAL_PRIV(0x0007)
2503CallPal_OpcDec07:
2504        br	r31, osfpal_calpal_opcdec
2505
2506        CALL_PAL_PRIV(0x0008)
2507CallPal_OpcDec08:
2508        br	r31, osfpal_calpal_opcdec
2509
2510//
2511// CSERVE - PALcode for CSERVE instruction
2512//
2513// Entry:
2514//	Vectored into via hardware PALcode instruction dispatch.
2515//
2516// Function:
2517//       Various functions for private use of console software
2518//
2519//       option selector in r0
2520//       arguments in r16....
2521//	The CSERVE routine is in the system specific module.
2522//
2523//
2524
2525        CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
2526Call_Pal_Cserve:
2527        br	r31, sys_cserve
2528
2529//
2530// swppal - PALcode for swppal instruction
2531//
2532// Entry:
2533//	Vectored into via hardware PALcode instruction dispatch.
2534//       Vectored into via hardware PALcode instruction dispatch.
2535//               R16 contains the new PAL identifier
2536//               R17:R21 contain implementation-specific entry parameters
2537//
2538//               R0  receives status:
2539//                0 success (PAL was switched)
2540//                1 unknown PAL variant
2541//                2 known PAL variant, but PAL not loaded
2542//
2543//
2544// Function:
2545//       Swap control to another PAL.
2546//
2547
2548        CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
2549Call_Pal_Swppal:
2550        cmpule	r16, 255, r0		// see if a kibble was passed
2551        cmoveq  r16, r16, r0            // if r16=0 then a valid address (ECO 59)
2552
2553        or	r16, r31, r3		// set r3 incase this is a address
2554        blbc	r0, swppal_cont		// nope, try it as an address
2555
2556        cmpeq	r16, 2, r0		// is it our friend OSF?
2557        blbc	r0, swppal_fail		// nope, don't know this fellow
2558
2559        br	r2, CALL_PAL_SWPPAL_10_			// tis our buddy OSF
2560
2561//	.global	osfpal_hw_entry_reset
2562//	.weak	osfpal_hw_entry_reset
2563//	.long	<osfpal_hw_entry_reset-pal_start>
2564//orig	halt				// don't know how to get the address here - kludge ok, load pal at 0
2565        .long	0			// ?? hack upon hack...pb
2566
2567CALL_PAL_SWPPAL_10_: 	ldl_p	r3, 0(r2)		// fetch target addr
2568//	ble	r3, swppal_fail		; if OSF not linked in say not loaded.
2569        mfpr	r2, pal_base		// fetch pal base
2570
2571        addq	r2, r3, r3		// add pal base
2572        lda	r2, 0x3FFF(r31)		// get pal base checker mask
2573
2574        and	r3, r2, r2		// any funky bits set?
2575        cmpeq	r2, 0, r0		//
2576
2577        blbc	r0, swppal_fail		// return unknown if bad bit set.
2578        br	r31, swppal_cont
2579
2580// .sbttl	"CALL_PAL OPCDECs"
2581
2582        CALL_PAL_PRIV(0x000B)
2583CallPal_OpcDec0B:
2584        br	r31, osfpal_calpal_opcdec
2585
2586        CALL_PAL_PRIV(0x000C)
2587CallPal_OpcDec0C:
2588        br	r31, osfpal_calpal_opcdec
2589
2590//
2591// wripir - PALcode for wripir instruction
2592//
2593// Entry:
2594//	Vectored into via hardware PALcode instruction dispatch.
2595//	r16 = processor number to interrupt
2596//
2597// Function:
2598//	IPIR	<- R16
2599//	Handled in system-specific code
2600//
2601// Exit:
2602//	interprocessor interrupt is recorded on the target processor
2603//	and is initiated when the proper enabling conditions are present.
2604//
2605
2606        CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
2607Call_Pal_Wrpir:
2608        br	r31, sys_wripir
2609
2610// .sbttl	"CALL_PAL OPCDECs"
2611
2612        CALL_PAL_PRIV(0x000E)
2613CallPal_OpcDec0E:
2614        br	r31, osfpal_calpal_opcdec
2615
2616        CALL_PAL_PRIV(0x000F)
2617CallPal_OpcDec0F:
2618        br	r31, osfpal_calpal_opcdec
2619
2620//
2621// rdmces - PALcode for rdmces instruction
2622//
2623// Entry:
2624//	Vectored into via hardware PALcode instruction dispatch.
2625//
2626// Function:
2627//	R0 <- ZEXT(MCES)
2628//
2629
2630        CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
2631Call_Pal_Rdmces:
2632        mfpr	r0, pt_mces		// Read from PALtemp
2633        and	r0, mces_m_all, r0	// Clear other bits
2634
2635        hw_rei
2636
2637//
2638// wrmces - PALcode for wrmces instruction
2639//
2640// Entry:
2641//	Vectored into via hardware PALcode instruction dispatch.
2642//
2643// Function:
2644//	If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
2645//	If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
2646//	If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
2647//	MCES<3> <- R16<3>		   (DPC)
2648//	MCES<4> <- R16<4>		   (DSC)
2649//
2650//
2651
2652        CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
2653Call_Pal_Wrmces:
2654        and	r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13	// Isolate MCHK, SCE, PCE
2655        mfpr	r14, pt_mces		// Get current value
2656
2657        ornot	r31, r13, r13		// Flip all the bits
2658        and	r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
2659
2660        and	r14, r13, r1		// Update MCHK, SCE, PCE
2661        bic	r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1	// Clear old DPC, DSC
2662
2663        or	r1, r17, r1		// Update DPC and DSC
2664        mtpr	r1, pt_mces		// Write MCES back
2665
2666        nop				// Pad to fix PT write->read restriction
2667
2668        nop
2669        hw_rei
2670
2671
2672
2673// CALL_PAL OPCDECs
2674
2675        CALL_PAL_PRIV(0x0012)
2676CallPal_OpcDec12:
2677        br	r31, osfpal_calpal_opcdec
2678
2679        CALL_PAL_PRIV(0x0013)
2680CallPal_OpcDec13:
2681        br	r31, osfpal_calpal_opcdec
2682
2683        CALL_PAL_PRIV(0x0014)
2684CallPal_OpcDec14:
2685        br	r31, osfpal_calpal_opcdec
2686
2687        CALL_PAL_PRIV(0x0015)
2688CallPal_OpcDec15:
2689        br	r31, osfpal_calpal_opcdec
2690
2691        CALL_PAL_PRIV(0x0016)
2692CallPal_OpcDec16:
2693        br	r31, osfpal_calpal_opcdec
2694
2695        CALL_PAL_PRIV(0x0017)
2696CallPal_OpcDec17:
2697        br	r31, osfpal_calpal_opcdec
2698
2699        CALL_PAL_PRIV(0x0018)
2700CallPal_OpcDec18:
2701        br	r31, osfpal_calpal_opcdec
2702
2703        CALL_PAL_PRIV(0x0019)
2704CallPal_OpcDec19:
2705        br	r31, osfpal_calpal_opcdec
2706
2707        CALL_PAL_PRIV(0x001A)
2708CallPal_OpcDec1A:
2709        br	r31, osfpal_calpal_opcdec
2710
2711        CALL_PAL_PRIV(0x001B)
2712CallPal_OpcDec1B:
2713        br	r31, osfpal_calpal_opcdec
2714
2715        CALL_PAL_PRIV(0x001C)
2716CallPal_OpcDec1C:
2717        br	r31, osfpal_calpal_opcdec
2718
2719        CALL_PAL_PRIV(0x001D)
2720CallPal_OpcDec1D:
2721        br	r31, osfpal_calpal_opcdec
2722
2723        CALL_PAL_PRIV(0x001E)
2724CallPal_OpcDec1E:
2725        br	r31, osfpal_calpal_opcdec
2726
2727        CALL_PAL_PRIV(0x001F)
2728CallPal_OpcDec1F:
2729        br	r31, osfpal_calpal_opcdec
2730
2731        CALL_PAL_PRIV(0x0020)
2732CallPal_OpcDec20:
2733        br	r31, osfpal_calpal_opcdec
2734
2735        CALL_PAL_PRIV(0x0021)
2736CallPal_OpcDec21:
2737        br	r31, osfpal_calpal_opcdec
2738
2739        CALL_PAL_PRIV(0x0022)
2740CallPal_OpcDec22:
2741        br	r31, osfpal_calpal_opcdec
2742
2743        CALL_PAL_PRIV(0x0023)
2744CallPal_OpcDec23:
2745        br	r31, osfpal_calpal_opcdec
2746
2747        CALL_PAL_PRIV(0x0024)
2748CallPal_OpcDec24:
2749        br	r31, osfpal_calpal_opcdec
2750
2751        CALL_PAL_PRIV(0x0025)
2752CallPal_OpcDec25:
2753        br	r31, osfpal_calpal_opcdec
2754
2755        CALL_PAL_PRIV(0x0026)
2756CallPal_OpcDec26:
2757        br	r31, osfpal_calpal_opcdec
2758
2759        CALL_PAL_PRIV(0x0027)
2760CallPal_OpcDec27:
2761        br	r31, osfpal_calpal_opcdec
2762
2763        CALL_PAL_PRIV(0x0028)
2764CallPal_OpcDec28:
2765        br	r31, osfpal_calpal_opcdec
2766
2767        CALL_PAL_PRIV(0x0029)
2768CallPal_OpcDec29:
2769        br	r31, osfpal_calpal_opcdec
2770
2771        CALL_PAL_PRIV(0x002A)
2772CallPal_OpcDec2A:
2773        br	r31, osfpal_calpal_opcdec
2774
2775//
2776// wrfen - PALcode for wrfen instruction
2777//
2778// Entry:
2779//	Vectored into via hardware PALcode instruction dispatch.
2780//
2781// Function:
2782//	a0<0> -> ICSR<FPE>
2783//	Store new FEN in PCB
2784//	Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16)
2785//          are UNPREDICTABLE
2786//
2787// Issue: What about pending FP loads when FEN goes from on->off????
2788//
2789
2790        CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
2791Call_Pal_Wrfen:
2792        or	r31, 1, r13		// Get a one
2793        mfpr	r1, ev5__icsr		// Get current FPE
2794
2795        sll	r13, icsr_v_fpe, r13	// shift 1 to icsr<fpe> spot, e0
2796        and	r16, 1, r16		// clean new fen
2797
2798        sll	r16, icsr_v_fpe, r12	// shift new fen to correct bit position
2799        bic	r1, r13, r1		// zero icsr<fpe>
2800
2801        or	r1, r12, r1		// Or new FEN into ICSR
2802        mfpr	r12, pt_pcbb		// Get PCBB - E1
2803
2804        mtpr	r1, ev5__icsr		// write new ICSR.  3 Bubble cycles to HW_REI
2805        stl_p	r16, osfpcb_q_fen(r12)	// Store FEN in PCB.
2806
2807        mfpr	r31, pt0		// Pad ICSR<FPE> write.
2808        mfpr	r31, pt0
2809
2810        mfpr	r31, pt0
2811//	pvc_violate 	225		// cuz PVC can't distinguish which bits changed
2812        hw_rei
2813
2814
2815        CALL_PAL_PRIV(0x002C)
2816CallPal_OpcDec2C:
2817        br	r31, osfpal_calpal_opcdec
2818
2819//
2820// wrvptpr - PALcode for wrvptpr instruction
2821//
2822// Entry:
2823//	Vectored into via hardware PALcode instruction dispatch.
2824//
2825// Function:
2826//	vptptr <- a0 (r16)
2827//
2828
2829        CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
2830Call_Pal_Wrvptptr:
2831        mtpr    r16, ev5__mvptbr                // Load Mbox copy
2832        mtpr    r16, ev5__ivptbr                // Load Ibox copy
2833        nop                                     // Pad IPR write
2834        nop
2835        hw_rei
2836
2837        CALL_PAL_PRIV(0x002E)
2838CallPal_OpcDec2E:
2839        br	r31, osfpal_calpal_opcdec
2840
2841        CALL_PAL_PRIV(0x002F)
2842CallPal_OpcDec2F:
2843        br	r31, osfpal_calpal_opcdec
2844
2845
2846//
2847// swpctx - PALcode for swpctx instruction
2848//
2849// Entry:
2850//       hardware dispatch via callPal instruction
2851//       R16 -> new pcb
2852//
2853// Function:
2854//       dynamic state moved to old pcb
2855//       new state loaded from new pcb
2856//       pcbb pointer set
2857//       old pcbb returned in R0
2858//
2859//  Note: need to add perf monitor stuff
2860//
2861
2862        CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
2863Call_Pal_Swpctx:
2864        rpcc	r13			// get cyccounter
2865        mfpr	r0, pt_pcbb		// get pcbb
2866
2867        ldq_p	r22, osfpcb_q_fen(r16)	// get new fen/pme
2868        ldq_p	r23, osfpcb_l_cc(r16)	// get new asn
2869
2870        srl	r13, 32, r25		// move offset
2871        mfpr	r24, pt_usp		// get usp
2872
2873        stq_p	r30, osfpcb_q_ksp(r0)	// store old ksp
2874//	pvc_violate 379			// stq_p can't trap except replay.  only problem if mf same ipr in same shadow.
2875        mtpr	r16, pt_pcbb		// set new pcbb
2876
2877        stq_p	r24, osfpcb_q_usp(r0)	// store usp
2878        addl	r13, r25, r25		// merge for new time
2879
2880        stl_p	r25, osfpcb_l_cc(r0)	// save time
2881        ldah	r24, (1<<(icsr_v_fpe-16))(r31)
2882
2883        and	r22, 1, r12		// isolate fen
2884        mfpr	r25, icsr		// get current icsr
2885
2886        lda	r24, (1<<icsr_v_pmp)(r24)
2887        br	r31, swpctx_cont
2888
2889//
2890// wrval - PALcode for wrval instruction
2891//
2892// Entry:
2893//	Vectored into via hardware PALcode instruction dispatch.
2894//
2895// Function:
2896//	sysvalue <- a0 (r16)
2897//
2898
2899        CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
2900Call_Pal_Wrval:
2901        nop
2902        mtpr	r16, pt_sysval		// Pad paltemp write
2903        nop
2904        nop
2905        hw_rei
2906
2907//
2908// rdval - PALcode for rdval instruction
2909//
2910// Entry:
2911//	Vectored into via hardware PALcode instruction dispatch.
2912//
2913// Function:
2914//	v0 (r0) <- sysvalue
2915//
2916
2917        CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
2918Call_Pal_Rdval:
2919        nop
2920        mfpr	r0, pt_sysval
2921        nop
2922        hw_rei
2923
2924//
2925// tbi - PALcode for tbi instruction
2926//
2927// Entry:
2928//	Vectored into via hardware PALcode instruction dispatch.
2929//
2930// Function:
2931//	TB invalidate
2932//       r16/a0 = TBI type
2933//       r17/a1 = Va for TBISx instructions
2934//
2935
2936        CALL_PAL_PRIV(PAL_TBI_ENTRY)
2937Call_Pal_Tbi:
2938        addq	r16, 2, r16			// change range to 0-2
2939        br	r23, CALL_PAL_tbi_10_		// get our address
2940
2941CALL_PAL_tbi_10_: cmpult	r16, 6, r22		// see if in range
2942        lda	r23, tbi_tbl-CALL_PAL_tbi_10_(r23)	// set base to start of table
2943        sll	r16, 4, r16		// * 16
2944        blbc	r22, CALL_PAL_tbi_30_		// go rei, if not
2945
2946        addq	r23, r16, r23		// addr of our code
2947//orig	pvc_jsr	tbi
2948        jmp	r31, (r23)		// and go do it
2949
2950CALL_PAL_tbi_30_:
2951        hw_rei
2952        nop
2953
2954//
2955// wrent - PALcode for wrent instruction
2956//
2957// Entry:
2958//	Vectored into via hardware PALcode instruction dispatch.
2959//
2960// Function:
2961//	Update ent* in paltemps
2962//       r16/a0 = Address of entry routine
2963//       r17/a1 = Entry Number 0..5
2964//
2965//       r22, r23 trashed
2966//
2967
2968        CALL_PAL_PRIV(PAL_WRENT_ENTRY)
2969Call_Pal_Wrent:
2970        cmpult	r17, 6, r22			// see if in range
2971        br	r23, CALL_PAL_wrent_10_		// get our address
2972
2973CALL_PAL_wrent_10_:	bic	r16, 3, r16	// clean pc
2974        blbc	r22, CALL_PAL_wrent_30_		// go rei, if not in range
2975
2976        lda	r23, wrent_tbl-CALL_PAL_wrent_10_(r23)	// set base to start of table
2977        sll	r17, 4, r17				// *16
2978
2979        addq  	r17, r23, r23		// Get address in table
2980//orig	pvc_jsr	wrent
2981        jmp	r31, (r23)		// and go do it
2982
2983CALL_PAL_wrent_30_:
2984        hw_rei				// out of range, just return
2985
2986//
2987// swpipl - PALcode for swpipl instruction
2988//
2989// Entry:
2990//	Vectored into via hardware PALcode instruction dispatch.
2991//
2992// Function:
2993//	v0 (r0)  <- PS<IPL>
2994//	PS<IPL>  <- a0<2:0>  (r16)
2995//
2996//	t8 (r22) is scratch
2997//
2998
2999        CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3000Call_Pal_Swpipl:
3001        and	r16, osfps_m_ipl, r16	// clean New ipl
3002        mfpr	r22, pt_intmask		// get int mask
3003
3004        extbl	r22, r16, r22		// get mask for this ipl
3005        bis	r11, r31, r0		// return old ipl
3006
3007        bis	r16, r31, r11		// set new ps
3008        mtpr	r22, ev5__ipl		// set new mask
3009
3010        mfpr	r31, pt0		// pad ipl write
3011        mfpr	r31, pt0		// pad ipl write
3012
3013        hw_rei				// back
3014
3015//
3016// rdps - PALcode for rdps instruction
3017//
3018// Entry:
3019//	Vectored into via hardware PALcode instruction dispatch.
3020//
3021// Function:
3022//	v0 (r0) <- ps
3023//
3024
3025        CALL_PAL_PRIV(PAL_RDPS_ENTRY)
3026Call_Pal_Rdps:
3027        bis	r11, r31, r0		// Fetch PALshadow PS
3028        nop				// Must be 2 cycles long
3029        hw_rei
3030
3031//
3032// wrkgp - PALcode for wrkgp instruction
3033//
3034// Entry:
3035//	Vectored into via hardware PALcode instruction dispatch.
3036//
3037// Function:
3038//	kgp <- a0 (r16)
3039//
3040
3041        CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
3042Call_Pal_Wrkgp:
3043        nop
3044        mtpr	r16, pt_kgp
3045        nop				// Pad for pt write->read restriction
3046        nop
3047        hw_rei
3048
3049//
3050// wrusp - PALcode for wrusp instruction
3051//
3052// Entry:
3053//	Vectored into via hardware PALcode instruction dispatch.
3054//
3055// Function:
3056//       usp <- a0 (r16)
3057//
3058
3059        CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
3060Call_Pal_Wrusp:
3061        nop
3062        mtpr	r16, pt_usp
3063        nop				// Pad possible pt write->read restriction
3064        nop
3065        hw_rei
3066
3067//
3068// wrperfmon - PALcode for wrperfmon instruction
3069//
3070// Entry:
3071//	Vectored into via hardware PALcode instruction dispatch.
3072//
3073//
3074// Function:
3075//	Various control functions for the onchip performance counters
3076//
3077//	option selector in r16
3078//	option argument in r17
3079//	returned status in r0
3080//
3081//
3082//	r16 = 0	Disable performance monitoring for one or more cpu's
3083//	  r17 = 0		disable no counters
3084//	  r17 = bitmask		disable counters specified in bit mask (1=disable)
3085//
3086//	r16 = 1	Enable performance monitoring for one or more cpu's
3087//	  r17 = 0		enable no counters
3088//	  r17 = bitmask		enable counters specified in bit mask (1=enable)
3089//
3090//	r16 = 2	Mux select for one or more cpu's
3091//	  r17 = Mux selection (cpu specific)
3092//    		<24:19>  	 bc_ctl<pm_mux_sel> field (see spec)
3093//		<31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
3094//
3095//	r16 = 3	Options
3096//	  r17 = (cpu specific)
3097//		<0> = 0 	log all processes
3098//		<0> = 1		log only selected processes
3099//		<30,9,8> 		mode select - ku,kp,kk
3100//
3101//	r16 = 4	Interrupt frequency select
3102//	  r17 = (cpu specific)	indicates interrupt frequencies desired for each
3103//				counter, with "zero interrupts" being an option
3104//				frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
3105//
3106//	r16 = 5	Read Counters
3107//	  r17 = na
3108//	  r0  = value (same format as ev5 pmctr)
3109//	        <0> = 0		Read failed
3110//	        <0> = 1		Read succeeded
3111//
3112//	r16 = 6	Write Counters
3113//	  r17 = value (same format as ev5 pmctr; all counters written simultaneously)
3114//
3115//	r16 = 7	Enable performance monitoring for one or more cpu's and reset counter to 0
3116//	  r17 = 0		enable no counters
3117//	  r17 = bitmask		enable & clear counters specified in bit mask (1=enable & clear)
3118//
3119//=============================================================================
3120//Assumptions:
3121//PMCTR_CTL:
3122//
3123//       <15:14>         CTL0 -- encoded frequency select and enable - CTR0
3124//       <13:12>         CTL1 --			"		   - CTR1
3125//       <11:10>         CTL2 --			"		   - CTR2
3126//
3127//       <9:8>           FRQ0 -- frequency select for CTR0 (no enable info)
3128//       <7:6>           FRQ1 -- frequency select for CTR1
3129//       <5:4>           FRQ2 -- frequency select for CTR2
3130//
3131//       <0>		all vs. select processes (0=all,1=select)
3132//
3133//     where
3134//	FRQx<1:0>
3135//	     0 1	disable interrupt
3136//	     1 0	frequency = 65536 (16384 for ctr2)
3137//	     1 1	frequency = 256
3138//	note:  FRQx<1:0> = 00 will keep counters from ever being enabled.
3139//
3140//=============================================================================
3141//
3142        CALL_PAL_PRIV(0x0039)
3143// unsupported in Hudson code .. pboyle Nov/95
3144CALL_PAL_Wrperfmon:
3145        // "real" performance monitoring code
3146        cmpeq	r16, 1, r0		// check for enable
3147        bne	r0, perfmon_en		// br if requested to enable
3148
3149        cmpeq	r16, 2, r0		// check for mux ctl
3150        bne	r0, perfmon_muxctl	// br if request to set mux controls
3151
3152        cmpeq	r16, 3, r0		// check for options
3153        bne	r0, perfmon_ctl		// br if request to set options
3154
3155        cmpeq	r16, 4, r0		// check for interrupt frequency select
3156        bne	r0, perfmon_freq	// br if request to change frequency select
3157
3158        cmpeq	r16, 5, r0		// check for counter read request
3159        bne	r0, perfmon_rd		// br if request to read counters
3160
3161        cmpeq	r16, 6, r0		// check for counter write request
3162        bne	r0, perfmon_wr		// br if request to write counters
3163
3164        cmpeq	r16, 7, r0		// check for counter clear/enable request
3165        bne	r0, perfmon_enclr	// br if request to clear/enable counters
3166
3167        beq	r16, perfmon_dis	// br if requested to disable (r16=0)
3168        br	r31, perfmon_unknown	// br if unknown request
3169
3170//
3171// rdusp - PALcode for rdusp instruction
3172//
3173// Entry:
3174//	Vectored into via hardware PALcode instruction dispatch.
3175//
3176// Function:
3177//	v0 (r0) <- usp
3178//
3179
3180        CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
3181Call_Pal_Rdusp:
3182        nop
3183        mfpr	r0, pt_usp
3184        hw_rei
3185
3186
3187        CALL_PAL_PRIV(0x003B)
3188CallPal_OpcDec3B:
3189        br	r31, osfpal_calpal_opcdec
3190
3191//
3192// whami - PALcode for whami instruction
3193//
3194// Entry:
3195//	Vectored into via hardware PALcode instruction dispatch.
3196//
3197// Function:
3198//	v0 (r0) <- whami
3199//
3200        CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
3201Call_Pal_Whami:
3202        nop
3203        mfpr    r0, pt_whami            // Get Whami
3204        extbl	r0, 1, r0		// Isolate just whami bits
3205        hw_rei
3206
3207//
3208// retsys - PALcode for retsys instruction
3209//
3210// Entry:
3211//	Vectored into via hardware PALcode instruction dispatch.
3212//       00(sp) contains return pc
3213//       08(sp) contains r29
3214//
3215// Function:
3216//	Return from system call.
3217//       mode switched from kern to user.
3218//       stacks swapped, ugp, upc restored.
3219//       r23, r25 junked
3220//
3221
3222        CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
3223Call_Pal_Retsys:
3224        lda	r25, osfsf_c_size(sp) 	// pop stack
3225        bis	r25, r31, r14		// touch r25 & r14 to stall mf exc_addr
3226
3227        mfpr	r14, exc_addr		// save exc_addr in case of fault
3228        ldq	r23, osfsf_pc(sp) 	// get pc
3229
3230        ldq	r29, osfsf_gp(sp) 	// get gp
3231        stl_c	r31, -4(sp)		// clear lock_flag
3232
3233        lda	r11, 1<<osfps_v_mode(r31)// new PS:mode=user
3234        mfpr	r30, pt_usp		// get users stack
3235
3236        bic	r23, 3, r23		// clean return pc
3237        mtpr	r31, ev5__ipl		// zero ibox IPL - 2 bubbles to hw_rei
3238
3239        mtpr	r11, ev5__dtb_cm	// set Mbox current mode - no virt ref for 2 cycles
3240        mtpr	r11, ev5__ps		// set Ibox current mode - 2 bubble to hw_rei
3241
3242        mtpr	r23, exc_addr		// set return address - 1 bubble to hw_rei
3243        mtpr	r25, pt_ksp		// save kern stack
3244
3245        rc	r31			// clear inter_flag
3246//	pvc_violate 248			// possible hidden mt->mf pt violation ok in callpal
3247        hw_rei_spe			// and back
3248
3249
3250        CALL_PAL_PRIV(0x003E)
3251CallPal_OpcDec3E:
3252        br	r31, osfpal_calpal_opcdec
3253
3254//
3255// rti - PALcode for rti instruction
3256//
3257// Entry:
3258//	Vectored into via hardware PALcode instruction dispatch.
3259//
3260// Function:
3261//	00(sp) -> ps
3262//	08(sp) -> pc
3263//	16(sp) -> r29 (gp)
3264//	24(sp) -> r16 (a0)
3265//	32(sp) -> r17 (a1)
3266//	40(sp) -> r18 (a3)
3267//
3268
3269        CALL_PAL_PRIV(PAL_RTI_ENTRY)
3270        /* called once by platform_tlaser */
3271        .globl Call_Pal_Rti
3272Call_Pal_Rti:
3273        lda	r25, osfsf_c_size(sp)	// get updated sp
3274        bis	r25, r31, r14		// touch r14,r25 to stall mf exc_addr
3275
3276        mfpr	r14, exc_addr		// save PC in case of fault
3277        rc	r31			// clear intr_flag
3278
3279        ldq	r12, -6*8(r25)		// get ps
3280        ldq	r13, -5*8(r25)		// pc
3281
3282        ldq	r18, -1*8(r25)		// a2
3283        ldq	r17, -2*8(r25)		// a1
3284
3285        ldq	r16, -3*8(r25)		// a0
3286        ldq	r29, -4*8(r25)		// gp
3287
3288        bic	r13, 3, r13		// clean return pc
3289        stl_c	r31, -4(r25)		// clear lock_flag
3290
3291        and	r12, osfps_m_mode, r11	// get mode
3292        mtpr	r13, exc_addr		// set return address
3293
3294        beq	r11, rti_to_kern	// br if rti to Kern
3295        br	r31, rti_to_user	// out of call_pal space
3296
3297
3298///////////////////////////////////////////////////
3299// Start the Unprivileged CALL_PAL Entry Points
3300///////////////////////////////////////////////////
3301
3302//
3303// bpt - PALcode for bpt instruction
3304//
3305// Entry:
3306//	Vectored into via hardware PALcode instruction dispatch.
3307//
3308// Function:
3309//	Build stack frame
3310//	a0 <- code
3311//	a1 <- unpred
3312//	a2 <- unpred
3313//	vector via entIF
3314//
3315//
3316//
3317        .text	1
3318//	. = 0x3000
3319        CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
3320Call_Pal_Bpt:
3321        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3322        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
3323
3324        bis	r11, r31, r12		// Save PS for stack write
3325        bge	r25, CALL_PAL_bpt_10_		// no stack swap needed if cm=kern
3326
3327        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
3328                                        //     no virt ref for next 2 cycles
3329        mtpr	r30, pt_usp		// save user stack
3330
3331        bis	r31, r31, r11		// Set new PS
3332        mfpr	r30, pt_ksp
3333
3334CALL_PAL_bpt_10_:
3335        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
3336        mfpr	r14, exc_addr		// get pc
3337
3338        stq	r16, osfsf_a0(sp)	// save regs
3339        bis	r31, osf_a0_bpt, r16	// set a0
3340
3341        stq	r17, osfsf_a1(sp)	// a1
3342        br	r31, bpt_bchk_common	// out of call_pal space
3343
3344
3345//
3346// bugchk - PALcode for bugchk instruction
3347//
3348// Entry:
3349//	Vectored into via hardware PALcode instruction dispatch.
3350//
3351// Function:
3352//	Build stack frame
3353//	a0 <- code
3354//	a1 <- unpred
3355//	a2 <- unpred
3356//	vector via entIF
3357//
3358//
3359//
3360        CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
3361Call_Pal_Bugchk:
3362        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3363        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
3364
3365        bis	r11, r31, r12		// Save PS for stack write
3366        bge	r25, CALL_PAL_bugchk_10_		// no stack swap needed if cm=kern
3367
3368        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
3369                                        //     no virt ref for next 2 cycles
3370        mtpr	r30, pt_usp		// save user stack
3371
3372        bis	r31, r31, r11		// Set new PS
3373        mfpr	r30, pt_ksp
3374
3375CALL_PAL_bugchk_10_:
3376        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
3377        mfpr	r14, exc_addr		// get pc
3378
3379        stq	r16, osfsf_a0(sp)	// save regs
3380        bis	r31, osf_a0_bugchk, r16	// set a0
3381
3382        stq	r17, osfsf_a1(sp)	// a1
3383        br	r31, bpt_bchk_common	// out of call_pal space
3384
3385
3386        CALL_PAL_UNPRIV(0x0082)
3387CallPal_OpcDec82:
3388        br	r31, osfpal_calpal_opcdec
3389
3390//
3391// callsys - PALcode for callsys instruction
3392//
3393// Entry:
3394//	Vectored into via hardware PALcode instruction dispatch.
3395//
3396// Function:
3397// 	Switch mode to kernel and build a callsys stack frame.
3398//       sp = ksp
3399//       gp = kgp
3400//	t8 - t10 (r22-r24) trashed
3401//
3402//
3403//
3404        CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
3405Call_Pal_Callsys:
3406
3407        and	r11, osfps_m_mode, r24	// get mode
3408        mfpr	r22, pt_ksp		// get ksp
3409
3410        beq	r24, sys_from_kern 	// sysCall from kern is not allowed
3411        mfpr	r12, pt_entsys		// get address of callSys routine
3412
3413//
3414// from here on we know we are in user going to Kern
3415//
3416        mtpr	r31, ev5__dtb_cm	// set Mbox current mode - no virt ref for 2 cycles
3417        mtpr	r31, ev5__ps		// set Ibox current mode - 2 bubble to hw_rei
3418
3419        bis	r31, r31, r11		// PS=0 (mode=kern)
3420        mfpr	r23, exc_addr		// get pc
3421
3422        mtpr	r30, pt_usp		// save usp
3423        lda	sp, 0-osfsf_c_size(r22)// set new sp
3424
3425        stq	r29, osfsf_gp(sp)	// save user gp/r29
3426        stq	r24, osfsf_ps(sp)	// save ps
3427
3428        stq	r23, osfsf_pc(sp)	// save pc
3429        mtpr	r12, exc_addr		// set address
3430                                        // 1 cycle to hw_rei
3431
3432        mfpr	r29, pt_kgp		// get the kern gp/r29
3433
3434        hw_rei_spe			// and off we go!
3435
3436
3437        CALL_PAL_UNPRIV(0x0084)
3438CallPal_OpcDec84:
3439        br	r31, osfpal_calpal_opcdec
3440
3441        CALL_PAL_UNPRIV(0x0085)
3442CallPal_OpcDec85:
3443        br	r31, osfpal_calpal_opcdec
3444
3445//
3446// imb - PALcode for imb instruction
3447//
3448// Entry:
3449//	Vectored into via hardware PALcode instruction dispatch.
3450//
3451// Function:
3452//       Flush the writebuffer and flush the Icache
3453//
3454//
3455//
3456        CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
3457Call_Pal_Imb:
3458        mb                              // Clear the writebuffer
3459        mfpr    r31, ev5__mcsr          // Sync with clear
3460        nop
3461        nop
3462        br      r31, pal_ic_flush           // Flush Icache
3463
3464
3465// CALL_PAL OPCDECs
3466
3467        CALL_PAL_UNPRIV(0x0087)
3468CallPal_OpcDec87:
3469        br	r31, osfpal_calpal_opcdec
3470
3471        CALL_PAL_UNPRIV(0x0088)
3472CallPal_OpcDec88:
3473        br	r31, osfpal_calpal_opcdec
3474
3475        CALL_PAL_UNPRIV(0x0089)
3476CallPal_OpcDec89:
3477        br	r31, osfpal_calpal_opcdec
3478
3479        CALL_PAL_UNPRIV(0x008A)
3480CallPal_OpcDec8A:
3481        br	r31, osfpal_calpal_opcdec
3482
3483        CALL_PAL_UNPRIV(0x008B)
3484CallPal_OpcDec8B:
3485        br	r31, osfpal_calpal_opcdec
3486
3487        CALL_PAL_UNPRIV(0x008C)
3488CallPal_OpcDec8C:
3489        br	r31, osfpal_calpal_opcdec
3490
3491        CALL_PAL_UNPRIV(0x008D)
3492CallPal_OpcDec8D:
3493        br	r31, osfpal_calpal_opcdec
3494
3495        CALL_PAL_UNPRIV(0x008E)
3496CallPal_OpcDec8E:
3497        br	r31, osfpal_calpal_opcdec
3498
3499        CALL_PAL_UNPRIV(0x008F)
3500CallPal_OpcDec8F:
3501        br	r31, osfpal_calpal_opcdec
3502
3503        CALL_PAL_UNPRIV(0x0090)
3504CallPal_OpcDec90:
3505        br	r31, osfpal_calpal_opcdec
3506
3507        CALL_PAL_UNPRIV(0x0091)
3508CallPal_OpcDec91:
3509        br	r31, osfpal_calpal_opcdec
3510
3511        CALL_PAL_UNPRIV(0x0092)
3512CallPal_OpcDec92:
3513        br	r31, osfpal_calpal_opcdec
3514
3515        CALL_PAL_UNPRIV(0x0093)
3516CallPal_OpcDec93:
3517        br	r31, osfpal_calpal_opcdec
3518
3519        CALL_PAL_UNPRIV(0x0094)
3520CallPal_OpcDec94:
3521        br	r31, osfpal_calpal_opcdec
3522
3523        CALL_PAL_UNPRIV(0x0095)
3524CallPal_OpcDec95:
3525        br	r31, osfpal_calpal_opcdec
3526
3527        CALL_PAL_UNPRIV(0x0096)
3528CallPal_OpcDec96:
3529        br	r31, osfpal_calpal_opcdec
3530
3531        CALL_PAL_UNPRIV(0x0097)
3532CallPal_OpcDec97:
3533        br	r31, osfpal_calpal_opcdec
3534
3535        CALL_PAL_UNPRIV(0x0098)
3536CallPal_OpcDec98:
3537        br	r31, osfpal_calpal_opcdec
3538
3539        CALL_PAL_UNPRIV(0x0099)
3540CallPal_OpcDec99:
3541        br	r31, osfpal_calpal_opcdec
3542
3543        CALL_PAL_UNPRIV(0x009A)
3544CallPal_OpcDec9A:
3545        br	r31, osfpal_calpal_opcdec
3546
3547        CALL_PAL_UNPRIV(0x009B)
3548CallPal_OpcDec9B:
3549        br	r31, osfpal_calpal_opcdec
3550
3551        CALL_PAL_UNPRIV(0x009C)
3552CallPal_OpcDec9C:
3553        br	r31, osfpal_calpal_opcdec
3554
3555        CALL_PAL_UNPRIV(0x009D)
3556CallPal_OpcDec9D:
3557        br	r31, osfpal_calpal_opcdec
3558
3559//
3560// rdunique - PALcode for rdunique instruction
3561//
3562// Entry:
3563//	Vectored into via hardware PALcode instruction dispatch.
3564//
3565// Function:
3566//	v0 (r0) <- unique
3567//
3568//
3569//
3570        CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
3571CALL_PALrdunique_:
3572        mfpr	r0, pt_pcbb		// get pcb pointer
3573        ldq_p	r0, osfpcb_q_unique(r0) // get new value
3574
3575        hw_rei
3576
3577//
3578// wrunique - PALcode for wrunique instruction
3579//
3580// Entry:
3581//	Vectored into via hardware PALcode instruction dispatch.
3582//
3583// Function:
3584//	unique <- a0 (r16)
3585//
3586//
3587//
3588CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
3589CALL_PAL_Wrunique:
3590        nop
3591        mfpr	r12, pt_pcbb		// get pcb pointer
3592        stq_p	r16, osfpcb_q_unique(r12)// get new value
3593        nop				// Pad palshadow write
3594        hw_rei				// back
3595
3596// CALL_PAL OPCDECs
3597
3598        CALL_PAL_UNPRIV(0x00A0)
3599CallPal_OpcDecA0:
3600        br	r31, osfpal_calpal_opcdec
3601
3602        CALL_PAL_UNPRIV(0x00A1)
3603CallPal_OpcDecA1:
3604        br	r31, osfpal_calpal_opcdec
3605
3606        CALL_PAL_UNPRIV(0x00A2)
3607CallPal_OpcDecA2:
3608        br	r31, osfpal_calpal_opcdec
3609
3610        CALL_PAL_UNPRIV(0x00A3)
3611CallPal_OpcDecA3:
3612        br	r31, osfpal_calpal_opcdec
3613
3614        CALL_PAL_UNPRIV(0x00A4)
3615CallPal_OpcDecA4:
3616        br	r31, osfpal_calpal_opcdec
3617
3618        CALL_PAL_UNPRIV(0x00A5)
3619CallPal_OpcDecA5:
3620        br	r31, osfpal_calpal_opcdec
3621
3622        CALL_PAL_UNPRIV(0x00A6)
3623CallPal_OpcDecA6:
3624        br	r31, osfpal_calpal_opcdec
3625
3626        CALL_PAL_UNPRIV(0x00A7)
3627CallPal_OpcDecA7:
3628        br	r31, osfpal_calpal_opcdec
3629
3630        CALL_PAL_UNPRIV(0x00A8)
3631CallPal_OpcDecA8:
3632        br	r31, osfpal_calpal_opcdec
3633
3634        CALL_PAL_UNPRIV(0x00A9)
3635CallPal_OpcDecA9:
3636        br	r31, osfpal_calpal_opcdec
3637
3638
3639//
3640// gentrap - PALcode for gentrap instruction
3641//
3642// CALL_PAL_gentrap:
3643// Entry:
3644//	Vectored into via hardware PALcode instruction dispatch.
3645//
3646// Function:
3647//	Build stack frame
3648//	a0 <- code
3649//	a1 <- unpred
3650//	a2 <- unpred
3651//	vector via entIF
3652//
3653//
3654
3655        CALL_PAL_UNPRIV(0x00AA)
3656// unsupported in Hudson code .. pboyle Nov/95
3657CALL_PAL_gentrap:
3658        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3659        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
3660
3661        bis	r11, r31, r12			// Save PS for stack write
3662        bge	r25, CALL_PAL_gentrap_10_	// no stack swap needed if cm=kern
3663
3664        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
3665                                        //     no virt ref for next 2 cycles
3666        mtpr	r30, pt_usp		// save user stack
3667
3668        bis	r31, r31, r11		// Set new PS
3669        mfpr	r30, pt_ksp
3670
3671CALL_PAL_gentrap_10_:
3672        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
3673        mfpr	r14, exc_addr		// get pc
3674
3675        stq	r16, osfsf_a0(sp)	// save regs
3676        bis	r31, osf_a0_gentrap, r16// set a0
3677
3678        stq	r17, osfsf_a1(sp)	// a1
3679        br	r31, bpt_bchk_common	// out of call_pal space
3680
3681
3682// CALL_PAL OPCDECs
3683
3684        CALL_PAL_UNPRIV(0x00AB)
3685CallPal_OpcDecAB:
3686        br	r31, osfpal_calpal_opcdec
3687
3688        CALL_PAL_UNPRIV(0x00AC)
3689CallPal_OpcDecAC:
3690        br	r31, osfpal_calpal_opcdec
3691
3692        CALL_PAL_UNPRIV(0x00AD)
3693CallPal_OpcDecAD:
3694        br	r31, osfpal_calpal_opcdec
3695
3696        CALL_PAL_UNPRIV(0x00AE)
3697CallPal_OpcDecAE:
3698        br	r31, osfpal_calpal_opcdec
3699
3700        CALL_PAL_UNPRIV(0x00AF)
3701CallPal_OpcDecAF:
3702        br	r31, osfpal_calpal_opcdec
3703
3704        CALL_PAL_UNPRIV(0x00B0)
3705CallPal_OpcDecB0:
3706        br	r31, osfpal_calpal_opcdec
3707
3708        CALL_PAL_UNPRIV(0x00B1)
3709CallPal_OpcDecB1:
3710        br	r31, osfpal_calpal_opcdec
3711
3712        CALL_PAL_UNPRIV(0x00B2)
3713CallPal_OpcDecB2:
3714        br	r31, osfpal_calpal_opcdec
3715
3716        CALL_PAL_UNPRIV(0x00B3)
3717CallPal_OpcDecB3:
3718        br	r31, osfpal_calpal_opcdec
3719
3720        CALL_PAL_UNPRIV(0x00B4)
3721CallPal_OpcDecB4:
3722        br	r31, osfpal_calpal_opcdec
3723
3724        CALL_PAL_UNPRIV(0x00B5)
3725CallPal_OpcDecB5:
3726        br	r31, osfpal_calpal_opcdec
3727
3728        CALL_PAL_UNPRIV(0x00B6)
3729CallPal_OpcDecB6:
3730        br	r31, osfpal_calpal_opcdec
3731
3732        CALL_PAL_UNPRIV(0x00B7)
3733CallPal_OpcDecB7:
3734        br	r31, osfpal_calpal_opcdec
3735
3736        CALL_PAL_UNPRIV(0x00B8)
3737CallPal_OpcDecB8:
3738        br	r31, osfpal_calpal_opcdec
3739
3740        CALL_PAL_UNPRIV(0x00B9)
3741CallPal_OpcDecB9:
3742        br	r31, osfpal_calpal_opcdec
3743
3744        CALL_PAL_UNPRIV(0x00BA)
3745CallPal_OpcDecBA:
3746        br	r31, osfpal_calpal_opcdec
3747
3748        CALL_PAL_UNPRIV(0x00BB)
3749CallPal_OpcDecBB:
3750        br	r31, osfpal_calpal_opcdec
3751
3752        CALL_PAL_UNPRIV(0x00BC)
3753CallPal_OpcDecBC:
3754        br	r31, osfpal_calpal_opcdec
3755
3756        CALL_PAL_UNPRIV(0x00BD)
3757CallPal_OpcDecBD:
3758        br	r31, osfpal_calpal_opcdec
3759
3760        CALL_PAL_UNPRIV(0x00BE)
3761CallPal_OpcDecBE:
3762        br	r31, osfpal_calpal_opcdec
3763
3764        CALL_PAL_UNPRIV(0x00BF)
3765CallPal_OpcDecBF:
3766        // MODIFIED BY EGH 2/25/04
3767        br	r31, copypal_impl
3768
3769
3770/*======================================================================*/
3771/*                   OSF/1 CALL_PAL CONTINUATION AREA                   */
3772/*======================================================================*/
3773
3774        .text	2
3775
3776        . = 0x4000
3777
3778
3779// Continuation of MTPR_PERFMON
3780        ALIGN_BLOCK
3781          // "real" performance monitoring code
3782// mux ctl
3783perfmon_muxctl:
3784        lda     r8, 1(r31) 			// get a 1
3785        sll     r8, pmctr_v_sel0, r8		// move to sel0 position
3786        or      r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8	// build mux select mask
3787        and	r17, r8, r25			// isolate pmctr mux select bits
3788        mfpr	r0, ev5__pmctr
3789        bic	r0, r8, r0			// clear old mux select bits
3790        or	r0,r25, r25			// or in new mux select bits
3791        mtpr	r25, ev5__pmctr
3792
3793        // ok, now tackle cbox mux selects
3794        ldah    r14, 0xfff0(r31)
3795        zap     r14, 0xE0, r14                 // Get Cbox IPR base
3796//orig	get_bc_ctl_shadow	r16		// bc_ctl returned in lower longword
3797// adapted from ev5_pal_macros.mar
3798        mfpr	r16, pt_impure
3799        lda	r16, CNS_Q_IPR(r16)
3800        RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
3801
3802        lda	r8, 0x3F(r31)			// build mux select mask
3803        sll	r8, bc_ctl_v_pm_mux_sel, r8
3804
3805        and	r17, r8, r25			// isolate bc_ctl mux select bits
3806        bic	r16, r8, r16			// isolate old mux select bits
3807        or	r16, r25, r25			// create new bc_ctl
3808        mb					// clear out cbox for future ipr write
3809        stq_p	r25, ev5__bc_ctl(r14)		// store to cbox ipr
3810        mb					// clear out cbox for future ipr write
3811
3812//orig	update_bc_ctl_shadow	r25, r16	// r25=value, r16-overwritten with adjusted impure ptr
3813// adapted from ev5_pal_macros.mar
3814        mfpr	r16, pt_impure
3815        lda	r16, CNS_Q_IPR(r16)
3816        SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
3817
3818        br 	r31, perfmon_success
3819
3820
3821// requested to disable perf monitoring
3822perfmon_dis:
3823        mfpr	r14, ev5__pmctr		// read ibox pmctr ipr
3824perfmon_dis_ctr0:			// and begin with ctr0
3825        blbc	r17, perfmon_dis_ctr1	// do not disable ctr0
3826        lda 	r8, 3(r31)
3827        sll	r8, pmctr_v_ctl0, r8
3828        bic	r14, r8, r14		// disable ctr0
3829perfmon_dis_ctr1:
3830        srl	r17, 1, r17
3831        blbc	r17, perfmon_dis_ctr2	// do not disable ctr1
3832        lda 	r8, 3(r31)
3833        sll	r8, pmctr_v_ctl1, r8
3834        bic	r14, r8, r14		// disable ctr1
3835perfmon_dis_ctr2:
3836        srl	r17, 1, r17
3837        blbc	r17, perfmon_dis_update	// do not disable ctr2
3838        lda 	r8, 3(r31)
3839        sll	r8, pmctr_v_ctl2, r8
3840        bic	r14, r8, r14		// disable ctr2
3841perfmon_dis_update:
3842        mtpr	r14, ev5__pmctr		// update pmctr ipr
3843//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
3844// adapted from ev5_pal_macros.mar
3845//orig	get_pmctr_ctl	r8, r25		// pmctr_ctl bit in r8.  adjusted impure pointer in r25
3846        mfpr	r25, pt_impure
3847        lda	r25, CNS_Q_IPR(r25)
3848        RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
3849
3850        lda	r17, 0x3F(r31)		// build mask
3851        sll	r17, pmctr_v_ctl2, r17 // shift mask to correct position
3852        and 	r14, r17, r14		// isolate ctl bits
3853        bic	r8, r17, r8		// clear out old ctl bits
3854        or	r14, r8, r14		// create shadow ctl bits
3855//orig	store_reg1 pmctr_ctl, r14, r25, ipr=1	// update pmctr_ctl register
3856//adjusted impure pointer still in r25
3857        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
3858
3859        br 	r31, perfmon_success
3860
3861
3862// requested to enable perf monitoring
3863//;the following code can be greatly simplified for pass2, but should work fine as is.
3864
3865
3866perfmon_enclr:
3867        lda	r9, 1(r31)		// set enclr flag
3868        br perfmon_en_cont
3869
3870perfmon_en:
3871        bis	r31, r31, r9		// clear enclr flag
3872
3873perfmon_en_cont:
3874        mfpr	r8, pt_pcbb		// get PCB base
3875//orig	get_pmctr_ctl r25, r25
3876        mfpr	r25, pt_impure
3877        lda	r25, CNS_Q_IPR(r25)
3878        RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
3879
3880        ldq_p	r16, osfpcb_q_fen(r8)	// read DAT/PME/FEN quadword
3881        mfpr	r14, ev5__pmctr		// read ibox pmctr ipr
3882        srl 	r16, osfpcb_v_pme, r16	// get pme bit
3883        mfpr	r13, icsr
3884        and	r16,  1, r16		// isolate pme bit
3885
3886        // this code only needed in pass2 and later
3887        lda	r12, 1<<icsr_v_pmp(r31)		// pb
3888        bic	r13, r12, r13		// clear pmp bit
3889        sll	r16, icsr_v_pmp, r12	// move pme bit to icsr<pmp> position
3890        or	r12, r13, r13		// new icsr with icsr<pmp> bit set/clear
3891        mtpr	r13, icsr		// update icsr
3892
3893        bis	r31, 1, r16		// set r16<0> on pass2 to update pmctr always (icsr provides real enable)
3894
3895        sll	r25, 6, r25		// shift frequency bits into pmctr_v_ctl positions
3896        bis	r14, r31, r13		// copy pmctr
3897
3898perfmon_en_ctr0:			// and begin with ctr0
3899        blbc	r17, perfmon_en_ctr1	// do not enable ctr0
3900
3901        blbc	r9, perfmon_en_noclr0	// enclr flag set, clear ctr0 field
3902        lda	r8, 0xffff(r31)
3903        zapnot  r8, 3, r8		// ctr0<15:0> mask
3904        sll	r8, pmctr_v_ctr0, r8
3905        bic	r14, r8, r14		// clear ctr bits
3906        bic	r13, r8, r13		// clear ctr bits
3907
3908perfmon_en_noclr0:
3909//orig	get_addr r8, 3<<pmctr_v_ctl0, r31
3910        LDLI(r8, (3<<pmctr_v_ctl0))
3911        and 	r25, r8, r12		//isolate frequency select bits for ctr0
3912        bic	r14, r8, r14		// clear ctl0 bits in preparation for enabling
3913        or	r14,r12,r14		// or in new ctl0 bits
3914
3915perfmon_en_ctr1:			// enable ctr1
3916        srl	r17, 1, r17		// get ctr1 enable
3917        blbc	r17, perfmon_en_ctr2	// do not enable ctr1
3918
3919        blbc	r9, perfmon_en_noclr1   // if enclr flag set, clear ctr1 field
3920        lda	r8, 0xffff(r31)
3921        zapnot  r8, 3, r8		// ctr1<15:0> mask
3922        sll	r8, pmctr_v_ctr1, r8
3923        bic	r14, r8, r14		// clear ctr bits
3924        bic	r13, r8, r13		// clear ctr bits
3925
3926perfmon_en_noclr1:
3927//orig	get_addr r8, 3<<pmctr_v_ctl1, r31
3928        LDLI(r8, (3<<pmctr_v_ctl1))
3929        and 	r25, r8, r12		//isolate frequency select bits for ctr1
3930        bic	r14, r8, r14		// clear ctl1 bits in preparation for enabling
3931        or	r14,r12,r14		// or in new ctl1 bits
3932
3933perfmon_en_ctr2:			// enable ctr2
3934        srl	r17, 1, r17		// get ctr2 enable
3935        blbc	r17, perfmon_en_return	// do not enable ctr2 - return
3936
3937        blbc	r9, perfmon_en_noclr2	// if enclr flag set, clear ctr2 field
3938        lda	r8, 0x3FFF(r31)		// ctr2<13:0> mask
3939        sll	r8, pmctr_v_ctr2, r8
3940        bic	r14, r8, r14		// clear ctr bits
3941        bic	r13, r8, r13		// clear ctr bits
3942
3943perfmon_en_noclr2:
3944//orig	get_addr r8, 3<<pmctr_v_ctl2, r31
3945        LDLI(r8, (3<<pmctr_v_ctl2))
3946        and 	r25, r8, r12		//isolate frequency select bits for ctr2
3947        bic	r14, r8, r14		// clear ctl2 bits in preparation for enabling
3948        or	r14,r12,r14		// or in new ctl2 bits
3949
3950perfmon_en_return:
3951        cmovlbs	r16, r14, r13		// if pme enabled, move enables into pmctr
3952                                        // else only do the counter clears
3953        mtpr	r13, ev5__pmctr		// update pmctr ipr
3954
3955//;this code not needed for pass2 and later, but does not hurt to leave it in
3956        lda	r8, 0x3F(r31)
3957//orig	get_pmctr_ctl r25, r12         	// read pmctr ctl; r12=adjusted impure pointer
3958        mfpr	r12, pt_impure
3959        lda	r12, CNS_Q_IPR(r12)
3960        RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
3961
3962        sll	r8, pmctr_v_ctl2, r8	// build ctl mask
3963        and	r8, r14, r14		// isolate new ctl bits
3964        bic	r25, r8, r25		// clear out old ctl value
3965        or	r25, r14, r14		// create new pmctr_ctl
3966//orig	store_reg1 pmctr_ctl, r14, r12, ipr=1
3967        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
3968
3969        br 	r31, perfmon_success
3970
3971
3972// options...
3973perfmon_ctl:
3974
3975// set mode
3976//orig	get_pmctr_ctl r14, r12         	// read shadow pmctr ctl; r12=adjusted impure pointer
3977        mfpr	r12, pt_impure
3978        lda	r12, CNS_Q_IPR(r12)
3979        RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
3980
3981        // build mode mask for pmctr register
3982        LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
3983        mfpr	r0, ev5__pmctr
3984        and	r17, r8, r25			// isolate pmctr mode bits
3985        bic	r0, r8, r0			// clear old mode bits
3986        or	r0, r25, r25			// or in new mode bits
3987        mtpr	r25, ev5__pmctr
3988
3989        // the following code will only be used in pass2, but should
3990        // not hurt anything if run in pass1.
3991        mfpr	r8, icsr
3992        lda	r25, 1<<icsr_v_pma(r31)		// set icsr<pma> if r17<0>=0
3993        bic 	r8, r25, r8			// clear old pma bit
3994        cmovlbs r17, r31, r25			// and clear icsr<pma> if r17<0>=1
3995        or	r8, r25, r8
3996        mtpr	r8, icsr		// 4 bubbles to hw_rei
3997        mfpr	r31, pt0			// pad icsr write
3998        mfpr	r31, pt0			// pad icsr write
3999
4000        // the following code not needed for pass2 and later, but
4001        // should work anyway.
4002        bis     r14, 1, r14       		// set for select processes
4003        blbs	r17, perfmon_sp			// branch if select processes
4004        bic	r14, 1, r14			// all processes
4005perfmon_sp:
4006//orig	store_reg1 pmctr_ctl, r14, r12, ipr=1   // update pmctr_ctl register
4007        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4008        br 	r31, perfmon_success
4009
4010// counter frequency select
4011perfmon_freq:
4012//orig	get_pmctr_ctl r14, r12         	// read shadow pmctr ctl; r12=adjusted impure pointer
4013        mfpr	r12, pt_impure
4014        lda	r12, CNS_Q_IPR(r12)
4015        RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4016
4017        lda	r8, 0x3F(r31)
4018//orig	sll	r8, pmctr_ctl_v_frq2, r8		// build mask for frequency select field
4019// I guess this should be a shift of 4 bits from the above control register structure
4020#define	pmctr_ctl_v_frq2_SHIFT 4
4021        sll	r8, pmctr_ctl_v_frq2_SHIFT, r8		// build mask for frequency select field
4022
4023        and 	r8, r17, r17
4024        bic 	r14, r8, r14				// clear out old frequency select bits
4025
4026        or 	r17, r14, r14				// or in new frequency select info
4027//orig	store_reg1 pmctr_ctl, r14, r12, ipr=1   // update pmctr_ctl register
4028        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4029
4030        br 	r31, perfmon_success
4031
4032// read counters
4033perfmon_rd:
4034        mfpr	r0, ev5__pmctr
4035        or	r0, 1, r0	// or in return status
4036        hw_rei			// back to user
4037
4038// write counters
4039perfmon_wr:
4040        mfpr	r14, ev5__pmctr
4041        lda	r8, 0x3FFF(r31)		// ctr2<13:0> mask
4042        sll	r8, pmctr_v_ctr2, r8
4043
4044        LDLI(r9, (0xFFFFFFFF))		// ctr2<15:0>,ctr1<15:0> mask
4045        sll	r9, pmctr_v_ctr1, r9
4046        or	r8, r9, r8		// or ctr2, ctr1, ctr0 mask
4047        bic	r14, r8, r14		// clear ctr fields
4048        and	r17, r8, r25		// clear all but ctr  fields
4049        or	r25, r14, r14		// write ctr fields
4050        mtpr	r14, ev5__pmctr		// update pmctr ipr
4051
4052        mfpr	r31, pt0		// pad pmctr write (needed only to keep PVC happy)
4053
4054perfmon_success:
4055        or      r31, 1, r0                     // set success
4056        hw_rei					// back to user
4057
4058perfmon_unknown:
4059        or	r31, r31, r0		// set fail
4060        hw_rei				// back to user
4061
4062
4063//////////////////////////////////////////////////////////
4064// Copy code
4065//////////////////////////////////////////////////////////
4066
4067copypal_impl:
4068        mov r16, r0
4069#ifdef CACHE_COPY
4070#ifndef CACHE_COPY_UNALIGNED
4071        and r16, 63, r8
4072        and r17, 63, r9
4073        bis r8, r9, r8
4074        bne r8, cache_copy_done
4075#endif
4076        bic r18, 63, r8
4077        and r18, 63, r18
4078        beq r8, cache_copy_done
4079cache_loop:
4080        ldf f17, 0(r16)
4081        stf f17, 0(r16)
4082        addq r17, 64, r17
4083        addq r16, 64, r16
4084        subq r8, 64, r8
4085        bne r8, cache_loop
4086cache_copy_done:
4087#endif
4088        ble r18, finished	// if len <=0 we are finished
4089        ldq_u r8, 0(r17)
4090        xor r17, r16, r9
4091        and r9, 7, r9
4092        and r16, 7, r10
4093        bne r9, unaligned
4094        beq r10, aligned
4095        ldq_u r9, 0(r16)
4096        addq r18, r10, r18
4097        mskqh r8, r17, r8
4098        mskql r9, r17, r9
4099        bis r8, r9, r8
4100aligned:
4101        subq r18, 1, r10
4102        bic r10, 7, r10
4103        and r18, 7, r18
4104        beq r10, aligned_done
4105loop:
4106        stq_u r8, 0(r16)
4107        ldq_u r8, 8(r17)
4108        subq r10, 8, r10
4109        lda r16,8(r16)
4110        lda r17,8(r17)
4111        bne r10, loop
4112aligned_done:
4113        bne r18, few_left
4114        stq_u r8, 0(r16)
4115        br r31, finished
4116        few_left:
4117        mskql r8, r18, r10
4118        ldq_u r9, 0(r16)
4119        mskqh r9, r18, r9
4120        bis r10, r9, r10
4121        stq_u r10, 0(r16)
4122        br r31, finished
4123unaligned:
4124        addq r17, r18, r25
4125        cmpule r18, 8, r9
4126        bne r9, unaligned_few_left
4127        beq r10, unaligned_dest_aligned
4128        and r16, 7, r10
4129        subq r31, r10, r10
4130        addq r10, 8, r10
4131        ldq_u r9, 7(r17)
4132        extql r8, r17, r8
4133        extqh r9, r17, r9
4134        bis r8, r9, r12
4135        insql r12, r16, r12
4136        ldq_u r13, 0(r16)
4137        mskql r13, r16, r13
4138        bis r12, r13, r12
4139        stq_u r12, 0(r16)
4140        addq r16, r10, r16
4141        addq r17, r10, r17
4142        subq r18, r10, r18
4143        ldq_u r8, 0(r17)
4144unaligned_dest_aligned:
4145        subq r18, 1, r10
4146        bic r10, 7, r10
4147        and r18, 7, r18
4148        beq r10, unaligned_partial_left
4149unaligned_loop:
4150        ldq_u r9, 7(r17)
4151        lda r17, 8(r17)
4152        extql r8, r17, r12
4153        extqh r9, r17, r13
4154        subq r10, 8, r10
4155        bis r12, r13, r13
4156        stq r13, 0(r16)
4157        lda r16, 8(r16)
4158        beq r10, unaligned_second_partial_left
4159        ldq_u r8, 7(r17)
4160        lda r17, 8(r17)
4161        extql r9, r17, r12
4162        extqh r8, r17, r13
4163        bis r12, r13, r13
4164        subq r10, 8, r10
4165        stq r13, 0(r16)
4166        lda r16, 8(r16)
4167        bne r10, unaligned_loop
4168unaligned_partial_left:
4169        mov r8, r9
4170unaligned_second_partial_left:
4171        ldq_u r8, -1(r25)
4172        extql r9, r17, r9
4173        extqh r8, r17, r8
4174        bis r8, r9, r8
4175        bne r18, few_left
4176        stq_u r8, 0(r16)
4177        br r31, finished
4178unaligned_few_left:
4179        ldq_u r9, -1(r25)
4180        extql r8, r17, r8
4181        extqh r9, r17, r9
4182        bis r8, r9, r8
4183        insqh r8, r16, r9
4184        insql r8, r16, r8
4185        lda r12, -1(r31)
4186        mskql r12, r18, r13
4187        cmovne r13, r13, r12
4188        insqh r12, r16, r13
4189        insql r12, r16, r12
4190        addq r16, r18, r10
4191        ldq_u r14, 0(r16)
4192        ldq_u r25, -1(r10)
4193        bic r14, r12, r14
4194        bic r25, r13, r25
4195        and r8, r12, r8
4196        and r9, r13, r9
4197        bis r8, r14, r8
4198        bis r9, r25, r9
4199        stq_u r9, -1(r10)
4200        stq_u r8, 0(r16)
4201finished:
4202        hw_rei
4203