osfpal.S revision 8007:013cbe16f1d6
1// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
2// since we don't have a mechanism to expand the data structures.... pb Nov/95
3
4// build_fixed_image: not sure what means
5// real_mm to be replaced during rewrite
6// remove_save_state  remove_restore_state can be remooved to save space ??
7
8
9#include "ev5_defs.h"
10#include "ev5_impure.h"
11#include "ev5_alpha_defs.h"
12#include "ev5_paldef.h"
13#include "ev5_osfalpha_defs.h"
14#include "fromHudsonMacros.h"
15#include "fromHudsonOsf.h"
16#include "dc21164FromGasSources.h"
17
18#ifdef SIMOS
19#define DEBUGSTORE(c) nop
20#else
21#define DEBUGSTORE(c) \
22        lda	r13, c(zero) ; \
23        bsr	r25, debugstore
24#endif
25
26#define DEBUG_EXC_ADDR()\
27        bsr	r25, put_exc_addr; \
28        DEBUGSTORE(13)		; \
29        DEBUGSTORE(10)
30
31#define egore 0
32#define acore 0
33#define beh_model 0
34#define ev5_p2 1
35#define ev5_p1 0
36#define ldvpte_bug_fix 1
37#define osf_chm_fix  0
38
39// Do we want to do this?? pb
40#define spe_fix 0
41// Do we want to do this?? pb
42#define build_fixed_image 0
43
44#define ev5_pass2
45#define enable_p4_fixups 0
46#define osf_svmin 1
47#define enable_physical_console 0
48#define fill_err_hack 0
49#define icflush_on_tbix 0
50#define max_cpuid 1
51#define perfmon_debug 0
52#define rawhide_system 0
53#define rax_mode 0
54
55
56// This is the fix for the user-mode super page references causing the machine to crash.
57#if (spe_fix == 1) && (build_fixed_image==1)
58#define hw_rei_spe	br	r31, hw_rei_update_spe
59#else
60#define hw_rei_spe	hw_rei
61#endif
62
63
64// redefine a few of the distribution-code names to match the Hudson gas names.
65// opcodes
66#define ldqp ldq_p
67#define stqp stq_p
68#define ldlp ldl_p
69#define stlp stl_p
70
71#define r0 $0
72#define r1 $1
73#define r2 $2
74#define r3 $3
75#define r4 $4
76#define r5 $5
77#define r6 $6
78#define r7 $7
79#define r8 $8
80#define r9 $9
81#define r10 $10
82#define r11 $11
83#define r12 $12
84#define r13 $13
85#define r14 $14
86#define r15 $15
87#define r16 $16
88#define r17 $17
89#define r18 $18
90#define r19 $19
91#define r20 $20
92#define r21 $21
93#define r22 $22
94#define r23 $23
95#define r24 $24
96#define r25 $25
97#define r26 $26
98#define r27 $27
99#define r28 $28
100#define r29 $29
101#define r30 $30
102#define r31 $31
103
104// 	.title	"EV5 OSF PAL"
105// 	.ident	"V1.18"
106//
107//****************************************************************************
108//*									    *
109//*  Copyright (c) 1992, 1993, 1994, 1995                      		    *
110//*  by DIGITAL Equipment Corporation, Maynard, Mass.			    *
111//* 									    *
112//*  This software is furnished under a license and may be used and  copied  *
113//*  only  in  accordance  with  the  terms  of  such  license and with the  *
114//*  inclusion of the above copyright notice.  This software or  any  other  *
115//*  copies  thereof may not be provided or otherwise made available to any  *
116//*  other person.  No title to and ownership of  the  software  is  hereby  *
117//*  transferred.							    *
118//* 									    *
119//*  The information in this software is subject to change  without  notice  *
120//*  and  should  not  be  construed  as  a commitment by DIGITAL Equipment  *
121//*  Corporation.							    *
122//* 									    *
123//*  DIGITAL assumes no responsibility for the use or  reliability  of  its  *
124//*  software on equipment which is not supplied by DIGITAL.		    *
125//*									    *
126//****************************************************************************
127
128// .sbttl	"Edit History"
129//+
130// Who		Rev	When		What
131// ------------	---	-----------	--------------------------------
132// DB		0.0	03-Nov-1992	Start
133// DB		0.1	28-Dec-1992	add swpctx
134// DB		0.2	05-Jan-1993	Bug: PVC found mtpr dtb_CM -> virt ref bug
135// DB		0.3	11-Jan-1993	rearrange trap entry points
136// DB		0.4	01-Feb-1993	add tbi
137// DB		0.5	04-Feb-1993	real MM, kludge reset flow, kludge swppal
138// DB		0.6	09-Feb-1993	Bug: several stack pushers used r16 for pc (should be r14)
139// DB		0.7	10-Feb-1993	Bug: pushed wrong PC (+8) on CALL_PAL OPCDEC
140//					Bug: typo on register number for store in wrunique
141//					Bug: rti to kern uses r16 as scratch
142//					Bug: callsys saving wrong value in pt_usp
143// DB		0.8	16-Feb-1993	PVC: fix possible pt write->read bug in wrkgp, wrusp
144// DB		0.9	18-Feb-1993	Bug: invalid_dpte_handler shifted pte twice
145//					Bug: rti stl_c could corrupt the stack
146//					Bug: unaligned returning wrong value in r17 (or should be and)
147// DB		0.10	19-Feb-1993	Add draina, rd/wrmces, cflush, cserve, interrupt
148// DB		0.11	23-Feb-1993	Turn caches on in reset flow
149// DB		0.12	10-Mar-1993	Bug: wrong value for icsr for FEN in kern mode flow
150// DB		0.13	15-Mar-1993	Bug: wrong value pushed for PC in invalid_dpte_handler if stack push tbmisses
151// DB		0.14	23-Mar-1993	Add impure pointer paltemp, reshuffle some other paltemps to match VMS
152// DB		0.15	15-Apr-1993	Combine paltemps for WHAMI and MCES
153// DB		0.16    12-May-1993	Update reset
154//                                       New restriction: no mfpr exc_addr in cycle 1 of call_pal flows
155//					Bug: in wrmces, not clearing DPC, DSC
156//					Update swppal
157//					Add pal bugchecks, pal_save_state, pal_restore_state
158// DB		0.17    24-May-1993	Add dfault_in_pal flow; fixup stack builder to have common state for pc/ps.
159//                                       New restriction: No hw_rei_stall in 0,1,2 after mtpr itb_asn
160// DB		0.18    26-May-1993	PVC fixes
161// JM  		0.19	01-jul-1993	Bug: OSFPAL_CALPAL_OPCDEC, TRAP_OPCDEC -- move mt exc_addr after stores
162// JM  		0.20	07-jul-1993	Update cns_ and mchk_ names for impure.mar conversion to .sdl
163//					Bug:  exc_addr was being loaded before stores that could dtb_miss in the following
164//						routines: TRAP_FEN,FEN_TO_OPCDEC,CALL_PAL_CALLSYS,RTI_TO_KERN
165// JM 		0.21	26-jul-1993	Bug: move exc_addr load after ALL stores in the following routines:
166//						TRAP_IACCVIO::,TRAP_OPCDEC::,TRAP_ARITH::,TRAP_FEN::
167//						dfault_trap_cont:,fen_to_opcdec:,invalid_dpte_handler:
168//						osfpal_calpal_opcdec:,CALL_PAL_callsys::,TRAP_UNALIGN::
169//					Bugs from PVC: trap_unalign - mt pt0 ->mf pt0 within 2 cycles
170// JM 		0.22	28-jul-1993	Add WRIPIR instruction
171// JM 		0.23	05-aug-1993	Bump version number for release
172// JM 		0.24	11-aug-1993	Bug: call_pal_swpipl - palshadow write -> hw_rei violation
173// JM		0.25	09-sep-1993	Disable certain "hidden" pvc checks in call_pals;
174//					New restriction: No hw_rei_stall in 0,1,2,3,4 after mtpr itb_asn - affects HALT(raxmode),
175//						and SWPCTX
176// JM		0.26	07-oct-1993	Re-implement pal_version
177// JM		0.27	12-oct-1993	One more time:  change pal_version format to conform to SRM
178// JM		0.28	14-oct-1993	Change ic_flush routine to pal_ic_flush
179// JM		0.29	19-oct-1993	BUG(?): dfault_in_pal: use exc_addr to check for dtbmiss,itbmiss check instead
180//						of mm_stat<opcode>.  mm_stat contains original opcode, not hw_ld.
181// JM		0.30	28-oct-1993	BUG: PVC violation - mf exc_addr in first cycles of call_pal in rti,retsys
182// JM		0.31	15-nov-1993	BUG: WRFEN trashing r0
183// JM            0.32	21-nov-1993	BUG: dtb_ldq,itb_ldq (used in dfault_in_pal) not defined when real_mm=0
184// JM		0.33	24-nov-1993	save/restore_state -
185//						BUG: use ivptbr to restore mvptbr
186// 						BUG: adjust hw_ld/st base/offsets to accomodate 10-bit offset limit
187//					     	CHANGE: Load 2 pages into dtb to accomodate compressed logout area/multiprocessors
188// JM		0.34	20-dec-1993	BUG: set r11<mode> to kernel for ksnv halt case
189//					BUG: generate ksnv halt when tb miss on kernel stack accesses
190//					     save exc_addr in r14 for invalid_dpte stack builder
191// JM		0.35	30-dec-1993	BUG: PVC violation in trap_arith - mt exc_sum in shadow of store with mf exc_mask in
192//					     the same shadow
193// JM		0.36	 6-jan-1994	BUG: fen_to_opcdec - savePC should be PC+4, need to save old PS, update new PS
194//					      New palcode restiction: mt icsr<fpe,hwe> --> 3 bubbles to hw_rei --affects wrfen
195// JM		0.37	25-jan-1994	BUG: PVC violations in restore_state - mt dc_mode/maf_mode ->mbox instructions
196//					Hide impure area manipulations in macros
197//					BUG: PVC violation in save and restore state-- move mt icsr out of shadow of ld/st
198//					Add some pvc_violate statements
199// JM		0.38	 1-feb-1994	Changes to save_state:  save pt1; don't save r31,f31; update comments to reflect reality;
200//					Changes to restore_state: restore pt1, icsr; don't restore r31,f31; update comments
201//						Add code to ensure fen bit set in icsr before ldt
202//					conditionally compile rax_more_reset out.
203//					move ldqp,stqp macro definitions to ev5_pal_macros.mar and add .mcall's for them here
204//					move rax reset stuff to ev5_osf_system_pal.m64
205// JM		0.39	 7-feb-1994	Move impure pointer to pal scratch space.  Use former pt_impure for bc_ctl shadow
206//						and performance monitoring bits
207//					Change to save_state routine to save more iprs.
208// JM		0.40	19-feb-1994	Change algorithm in save/restore_state routines; add f31,r31 back in
209// JM		0.41	21-feb-1994     Add flags to compile out save/restore state (not needed in some systems)
210//						remove_save_state,remove_restore_state;fix new pvc violation in save_state
211// JM		0.42	22-feb-1994     BUG: save_state overwriting r3
212// JM		0.43	24-feb-1994	BUG: save_state saving wrong icsr
213// JM		0.44	28-feb-1994	Remove ic_flush from wr_tbix instructions
214// JM		0.45	15-mar-1994	BUG: call_pal_tbi trashes a0 prior to range check (instruction order problem)
215//					New pal restriction in pal_restore_state: icsr<fpe>->floating instr = 3 bubbles
216//					Add exc_sum and exc_mask to pal_save_state (not restore)
217// JM		0.46	22-apr-1994	Move impure pointer back into paltemp;  Move bc_ctl shadow and pmctr_ctl into impure
218//						area.
219//					Add performance counter support to swpctx and wrperfmon
220// JM            0.47    9-may-1994	Bump version # (for ev5_osf_system_pal.m64 sys_perfmon fix)
221// JM		0.48	13-jun-1994	BUG: trap_interrupt --> put new ev5 ipl at 30 for all osfipl6 interrupts
222// JM		0.49	8-jul-1994	BUG: In the unlikely (impossible?) event that the branch to pal_pal_bug_check is
223//						taken in the interrupt flow, stack is pushed twice.
224//					SWPPAL - update to support ECO 59 to allow 0 as a valid address
225//					Add itb flush to save/restore state routines
226//					Change hw_rei to hw_rei_stall in ic_flush routine.  Shouldn't be necessary, but
227//						conforms to itbia restriction.
228//					Added enable_physical_console flag (for enter/exit console routines only)
229// JM		0.50	29-jul-1994	Add code to dfault & invalid_dpte_handler to ignore exceptions on a
230//						load to r31/f31.  changed dfault_fetch_err to dfault_fetch_ldr31_err and
231//						nmiss_fetch_err to nmiss_fetch_ldr31_err.
232// JM		1.00	 1-aug-1994	Add pass2 support (swpctx)
233// JM		1.01	 2-aug-1994	swppal now passes bc_ctl/bc_config in r1/r2
234// JM		1.02	15-sep-1994	BUG: swpctx missing shift of pme bit to correct position in icsr (pass2)
235//					Moved perfmon code here from system file.
236//					BUG: pal_perfmon - enable function not saving correct enables when pme not set (pass1)
237// JM		1.03	3-oct-1994	Added (pass2 only) code to wrperfmon enable function to look at pme bit.
238// JM		1.04	14-oct-1994	BUG: trap_interrupt - ISR read (and saved) before INTID -- INTID can change
239//						after ISR read, but we won't catch the ISR update.  reverse order
240// JM		1.05	17-nov-1994	Add code to dismiss UNALIGN trap if LD r31/F31
241// JM		1.06	28-nov-1994	BUG: missing mm_stat shift for store case in trap_unalign (new bug due to "dismiss" code)
242// JM		1.07	 1-dec-1994	EV5 PASS1,2,3 BUG WORKAROUND:  Add flag LDVPTE_BUG_FIX.  In DTBMISS_DOUBLE, branch to
243//					DTBMISS_SINGLE if not in palmode.
244// JM            1.08     9-jan-1995     Bump version number for change to EV5_OSF_SYSTEM_PAL.M64 - ei_stat fix in mchk logout frame
245// JM 		1.09	 2-feb-1995	Add flag "spe_fix" and accompanying code to workaround pre-pass4 bug:  Disable Ibox
246//					superpage mode in User mode and re-enable in kernel mode.
247//					EV5_OSF_SYSTEM_PAL.M64 and EV5_PALDEF.MAR (added pt_misc_v_cm) also changed to support this.
248// JM		1.10    24-feb-1995	Set ldvpte_bug_fix regardless of ev5 pass.   set default to ev5_p2
249// ES		1.11	10-mar-1995	Add flag "osf_chm_fix" to enable dcache in user mode only to avoid
250//					cpu bug.
251// JM		1.12	17-mar-1995	BUG FIX: Fix F0 corruption problem in pal_restore_state
252// ES		1.13	17-mar-1995	Refine osf_chm_fix
253// ES		1.14	20-mar-1995	Don't need as many stalls before hw_rei_stall in chm_fix
254// ES		1.15	21-mar-1995	Add a stall to avoid a pvc violation in pal_restore_state
255//					Force pvc checking of exit_console
256// ES		1.16	26-apr-1995	In the wrperfmon disable function, correct meaning of R17<2:0> to ctl2,ctl2,ctl0
257// ES		1.17	01-may-1995	In hw_rei_update_spe code, in the osf_chm fix, use bic and bis (self-correcting)
258//					instead of xor to maintain previous mode in pt_misc
259// ES		1.18	14-jul-1995	In wrperfmon enable on pass2, update pmctr even if current process does
260//					not have pme set. The bits in icsr maintain the master enable state.
261//					In sys_reset, add icsr<17>=1 for ev56 byte/word eco enable
262//
263#define vmaj 1
264#define vmin 18
265#define vms_pal 1
266#define osf_pal 2
267#define pal_type osf_pal
268#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
269//-
270
271// .sbttl	"PALtemp register usage"
272
273//+
274//  The EV5 Ibox holds 24 PALtemp registers.  This maps the OSF PAL usage
275//  for these PALtemps:
276//
277//	pt0   local scratch
278//	pt1   local scratch
279//	pt2   entUna					pt_entUna
280//	pt3   CPU specific impure area pointer		pt_impure
281//	pt4   memory management temp
282//	pt5   memory management temp
283//	pt6   memory management temp
284//	pt7   entIF					pt_entIF
285//	pt8   intmask					pt_intmask
286//	pt9   entSys					pt_entSys
287//	pt10
288//	pt11  entInt					pt_entInt
289//	pt12  entArith					pt_entArith
290//	pt13  reserved for system specific PAL
291//	pt14  reserved for system specific PAL
292//	pt15  reserved for system specific PAL
293//	pt16  MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami, pt_mces
294//	pt17  sysval					pt_sysval
295//	pt18  usp					pt_usp
296//	pt19  ksp					pt_ksp
297//	pt20  PTBR					pt_ptbr
298//	pt21  entMM					pt_entMM
299//	pt22  kgp					pt_kgp
300//	pt23  PCBB					pt_pcbb
301//
302//-
303
304// .sbttl	"PALshadow register usage"
305//
306//+
307//
308// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
309// This maps the OSF PAL usage of R8 - R14 and R25:
310//
311// 	r8    ITBmiss/DTBmiss scratch
312// 	r9    ITBmiss/DTBmiss scratch
313// 	r10   ITBmiss/DTBmiss scratch
314//	r11   PS
315//	r12   local scratch
316//	r13   local scratch
317//	r14   local scratch
318//	r25   local scratch
319//
320//
321//-
322
323// .sbttl	"ALPHA symbol definitions"
324// 	_OSF_PSDEF	GLOBAL
325// 	_OSF_PTEDEF	GLOBAL
326// 	_OSF_VADEF	GLOBAL
327// 	_OSF_PCBDEF	GLOBAL
328//         _OSF_SFDEF      GLOBAL
329//         _OSF_MMCSR_DEF  GLOBAL
330// 	_SCBDEF		GLOBAL
331// 	_FRMDEF		GLOBAL
332// 	_EXSDEF		GLOBAL
333// 	_OSF_A0_DEF	GLOBAL
334// 	_MCESDEF	GLOBAL
335
336// .sbttl	"EV5 symbol definitions"
337
338// 	_EV5DEF
339// 	_PALTEMP
340// 	_MM_STAT_DEF
341//         _EV5_MM
342//         _EV5_IPLDEF
343
344//         _HALT_CODES     GLOBAL
345//         _MCHK_CODES     GLOBAL
346
347//         _PAL_IMPURE
348//         _PAL_LOGOUT
349
350
351
352
353// .sbttl	"PALcode configuration options"
354
355// There are a number of options that may be assembled into this version of
356// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
357// the following). The options that can be adjusted cause the resultant PALcode
358// to reflect the desired target system.
359
360
361#define osfpal 1				// This is the PALcode for OSF.
362
363#ifndef rawhide_system
364
365#define rawhide_system 0
366#endif
367
368
369#ifndef real_mm
370// Page table translation vs 1-1 mapping
371#define real_mm 1
372#endif
373
374
375#ifndef rax_mode
376
377#define rax_mode 0
378#endif
379
380#ifndef egore
381// End of reset flow starts a program at 200000(hex).
382#define egore 1
383#endif
384
385#ifndef acore
386// End of reset flow starts a program at 40000(hex).
387#define acore 0
388#endif
389
390
391// 	assume acore+egore+rax_mode lt 2	// Assertion checker
392
393#ifndef beh_model
394// EV5 behavioral model specific code
395#define beh_model 1
396#endif
397
398#ifndef init_cbox
399// Reset flow init of Bcache and Scache
400#define init_cbox 1
401#endif
402
403#ifndef disable_crd
404// Decides whether the reset flow will disable
405#define disable_crd 0
406#endif
407
408                                                // correctable read interrupts via ICSR
409#ifndef perfmon_debug
410#define perfmon_debug 0
411#endif
412
413#ifndef icflush_on_tbix
414#define icflush_on_tbix 0
415#endif
416
417#ifndef remove_restore_state
418#define remove_restore_state 0
419#endif
420
421#ifndef remove_save_state
422#define remove_save_state 0
423#endif
424
425#ifndef enable_physical_console
426#define enable_physical_console 0
427#endif
428
429#ifndef ev5_p1
430#define ev5_p1 0
431#endif
432
433#ifndef ev5_p2
434#define ev5_p2 1
435#endif
436
437// 	assume ev5_p1+ev5_p2 eq 1
438
439#ifndef ldvpte_bug_fix
440#define ldvpte_bug_fix 1			// If set, fix ldvpte bug in dtbmiss_double flow.
441#endif
442
443#ifndef spe_fix
444// If set, disable super-page mode in user mode and re-enable
445#define spe_fix 0
446#endif
447                                                // in kernel.  Workaround for cpu bug.
448#ifndef build_fixed_image
449#define build_fixed_image 0
450#endif
451
452
453#ifndef fill_err_hack
454// If set, disable fill_error mode in user mode and re-enable
455#define fill_err_hack 0
456#endif
457
458                                                    // in kernel.  Workaround for cpu bug.
459
460//	.macro hw_rei_spe
461//	.iif eq spe_fix, hw_rei
462//#if spe_fix != 0
463//
464//
465//#define hw_rei_chm_count hw_rei_chm_count + 1
466//	p4_fixup_label	\hw_rei_chm_count
467//	.iif eq	build_fixed_image,	br	r31, hw_rei_update_spe
468//	.iif ne build_fixed_image,	hw_rei
469//#endif
470//
471//	.endm
472
473// Add flag "osf_chm_fix" to enable dcache in user mode only
474// to avoid cpu bug.
475
476#ifndef osf_chm_fix
477// If set, enable D-Cache in
478#define osf_chm_fix 0
479#endif
480
481#if osf_chm_fix != 0
482// user mode only.
483#define hw_rei_chm_count 0
484#endif
485
486#if osf_chm_fix != 0
487
488#define hw_rei_stall_chm_count 0
489#endif
490
491#ifndef enable_p4_fixups
492
493#define enable_p4_fixups 0
494#endif
495
496                                        // If set, do EV5 Pass 4 fixups
497#if spe_fix == 0
498
499#define osf_chm_fix 0
500#endif
501
502#if spe_fix == 0
503
504#define enable_p4_fixups 0
505#endif
506
507                                        // Only allow fixups if fix enabled
508
509        //Turn off fill_errors and MEM_NEM in user mode
510//	.macro fill_error_hack ?L10_, ?L20_, ?L30_, ?L40_
511//	//save r22,r23,r24
512//	stqp r22, 0x150(r31)	//add
513//	stqp r23, 0x158(r31)	//contents
514//	stqp r24, 0x160(r31)	//bit mask
515//
516//        lda     r22, 0x82(r31)
517//        ldah    r22, 0x8740(r22)
518//        sll     r22, 8, r22
519//        ldlp    r23, 0x80(r22)          // r23 <- contents of CIA_MASK
520//        bis     r23,r31,r23
521//
522//	lda	r24, 0x8(r31)		// r24 <- MEM_NEM bit
523//	beq	r10, L10_		// IF user mode (r10<0> == 0) pal mode
524//	bic	r23, r24, r23		// set fillerr_en bit
525//	br	r31, L20_		// ELSE
526//L10_:	bis	r23, r24, r23		// clear fillerr_en bit
527//L20_:					// ENDIF
528//
529//	stlp	r23, 0x80(r22)		// write back the CIA_MASK register
530//	mb
531//	ldlp    r23, 0x80(r22)
532//	bis     r23,r31,r23
533//	mb
534//
535//	lda	r22, 1(r31)		// r22 <- 87.4000.0100 ptr to CIA_CTRL
536//	ldah	r22, 0x8740(r22)
537//	sll	r22, 8, r22
538//	ldlp	r23, 0(r22)		// r23 <- contents of CIA_CTRL
539//	bis     r23,r31,r23
540//
541//
542//	lda	r24, 0x400(r31)		// r9 <- fillerr_en bit
543//	beq	r10, L30_		// IF user mode (r10<0> == 0) pal mode
544//	bic	r23, r24, r23		// set fillerr_en bit
545//	br	r31, L40_		// ELSE
546//L30_:	bis	r23, r24, r23		// clear fillerr_en bit
547//L40_:					// ENDIF
548//
549//	stlp	r23, 0(r22)		// write back the CIA_CTRL register
550//	mb
551//	ldlp    r23, 0(r22)
552//	bis     r23,r31,r23
553//	mb
554//
555//	//restore r22,r23,r24
556//	ldqp r22, 0x150(r31)
557//	ldqp r23, 0x158(r31)
558//	ldqp r24, 0x160(r31)
559//
560//	.endm
561
562// multiprocessor support can be enabled for a max of n processors by
563// setting the following to the number of processors on the system.
564// Note that this is really the max cpuid.
565
566#ifndef max_cpuid
567#define max_cpuid 8
568#endif
569
570#ifndef osf_svmin			// platform specific palcode version number
571#define osf_svmin 0
572#endif
573
574
575#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
576
577// .mcall ldqp		// override macro64 definition with macro from library
578// .mcall stqp		// override macro64 definition with macro from library
579
580
581// 	.psect	_pal,mix
582// huh pb pal_base:
583// huh pb #define current_block_base . - pal_base
584
585// .sbttl	"RESET	-  Reset Trap Entry Point"
586//+
587// RESET - offset 0000
588// Entry:
589//	Vectored into via hardware trap on reset, or branched to
590//	on swppal.
591//
592//	r0 = whami
593//	r1 = pal_base
594//	r2 = base of scratch area
595//	r3 = halt code
596//
597//
598// Function:
599//
600//-
601
602        .text	0
603        . = 0x0000
604        .globl Pal_Base
605Pal_Base:
606        HDW_VECTOR(PAL_RESET_ENTRY)
607Trap_Reset:
608        nop
609#ifdef SIMOS
610        /*
611         * store into r1
612         */
613        br r1,sys_reset
614#else
615        /* following is a srcmax change */
616
617        DEBUGSTORE(0x41)
618        /* The original code jumped using r1 as a linkage register to pass the base
619           of PALcode to the platform specific code.  We use r1 to pass a parameter
620           from the SROM, so we hardcode the address of Pal_Base in platform.s
621         */
622        br	r31, sys_reset
623#endif
624
625        // Specify PAL version info as a constant
626        // at a known location (reset + 8).
627
628        .long osfpal_version_l		// <pal_type@16> ! <vmaj@8> ! <vmin@0>
629        .long osfpal_version_h		// <max_cpuid@16> ! <osf_svmin@0>
630        .long 0
631        .long 0
632pal_impure_start:
633        .quad 0
634pal_debug_ptr:
635        .quad 0				// reserved for debug pointer ; 20
636#if beh_model == 0
637
638
639#if enable_p4_fixups != 0
640
641
642        .quad 0
643        .long p4_fixup_hw_rei_fixup_table
644#endif
645
646#else
647
648        .quad 0				//
649        .quad 0	//0x0030
650        .quad 0
651        .quad 0 //0x0040
652        .quad 0
653        .quad 0 //0x0050
654        .quad 0
655        .quad 0 //0x0060
656        .quad 0
657pal_enter_cns_address:
658        .quad 0				//0x0070 -- address to jump to from enter_console
659        .long <<sys_exit_console-pal_base>+1>      //0x0078 -- offset to sys_exit_console (set palmode bit)
660#endif
661
662
663
664
665// .sbttl	"IACCVIO- Istream Access Violation Trap Entry Point"
666
667//+
668// IACCVIO - offset 0080
669// Entry:
670//	Vectored into via hardware trap on Istream access violation or sign check error on PC.
671//
672// Function:
673//	Build stack frame
674//	a0 <- Faulting VA
675//	a1 <- MMCSR  (1 for ACV)
676//	a2 <- -1 (for ifetch fault)
677//	vector via entMM
678//-
679
680        HDW_VECTOR(PAL_IACCVIO_ENTRY)
681Trap_Iaccvio:
682        DEBUGSTORE(0x42)
683        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
684        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
685
686        bis	r11, r31, r12		// Save PS
687        bge	r25, TRAP_IACCVIO_10_		// no stack swap needed if cm=kern
688
689
690        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
691                                        //     no virt ref for next 2 cycles
692        mtpr	r30, pt_usp		// save user stack
693
694        bis	r31, r31, r12		// Set new PS
695        mfpr	r30, pt_ksp
696
697TRAP_IACCVIO_10_:
698        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
699        mfpr	r14, exc_addr		// get pc
700
701        stq	r16, osfsf_a0(sp)	// save regs
702        bic	r14, 3, r16		// pass pc/va as a0
703
704        stq	r17, osfsf_a1(sp)	// a1
705        or	r31, mmcsr_c_acv, r17	// pass mm_csr as a1
706
707        stq	r18, osfsf_a2(sp) 	// a2
708        mfpr	r13, pt_entmm		// get entry point
709
710        stq	r11, osfsf_ps(sp)	// save old ps
711        bis	r12, r31, r11		// update ps
712
713        stq	r16, osfsf_pc(sp)	// save pc
714        stq	r29, osfsf_gp(sp) 	// save gp
715
716        mtpr	r13, exc_addr		// load exc_addr with entMM
717                                        // 1 cycle to hw_rei
718        mfpr	r29, pt_kgp		// get the kgp
719
720        subq	r31, 1, r18		// pass flag of istream, as a2
721        hw_rei_spe
722
723
724// .sbttl	"INTERRUPT- Interrupt Trap Entry Point"
725
726//+
727// INTERRUPT - offset 0100
728// Entry:
729//	Vectored into via trap on hardware interrupt
730//
731// Function:
732//	check for halt interrupt
733//	check for passive release (current ipl geq requestor)
734//	if necessary, switch to kernel mode
735//	push stack frame, update ps (including current mode and ipl copies), sp, and gp
736//	pass the interrupt info to the system module
737//
738//-
739
740
741        HDW_VECTOR(PAL_INTERRUPT_ENTRY)
742Trap_Interrupt:
743        mfpr    r13, ev5__intid         // Fetch level of interruptor
744        mfpr    r25, ev5__isr           // Fetch interrupt summary register
745
746        srl     r25, isr_v_hlt, r9     // Get HLT bit
747        mfpr	r14, ev5__ipl
748
749        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kern
750        blbs    r9, sys_halt_interrupt	// halt_interrupt if HLT bit set
751
752        cmple   r13, r14, r8            // R8 = 1 if intid .less than or eql. ipl
753        bne     r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
754
755        and	r11, osfps_m_mode, r10 // get mode bit
756        beq	r10, TRAP_INTERRUPT_10_		// Skip stack swap in kernel
757
758        mtpr	r30, pt_usp		// save user stack
759        mfpr	r30, pt_ksp		// get kern stack
760
761TRAP_INTERRUPT_10_:
762        lda	sp, (0-osfsf_c_size)(sp)// allocate stack space
763        mfpr	r14, exc_addr		// get pc
764
765        stq	r11, osfsf_ps(sp) 	// save ps
766        stq	r14, osfsf_pc(sp) 	// save pc
767
768        stq     r29, osfsf_gp(sp)       // push gp
769        stq	r16, osfsf_a0(sp)	// a0
770
771//	pvc_violate 354			// ps is cleared anyway,  if store to stack faults.
772        mtpr    r31, ev5__ps            // Set Ibox current mode to kernel
773        stq	r17, osfsf_a1(sp)	// a1
774
775        stq	r18, osfsf_a2(sp) 	// a2
776        subq	r13, 0x11, r12		// Start to translate from EV5IPL->OSFIPL
777
778        srl	r12, 1, r8		// 1d, 1e: ipl 6.  1f: ipl 7.
779        subq	r13, 0x1d, r9		// Check for 1d, 1e, 1f
780
781        cmovge	r9, r8, r12		// if .ge. 1d, then take shifted value
782        bis	r12, r31, r11		// set new ps
783
784        mfpr	r12, pt_intmask
785        and	r11, osfps_m_ipl, r14	// Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
786
787#ifdef SIMOS
788        /*
789         * Lance had space problems. We don't.
790         */
791        extbl	r12, r14, r14		// Translate new OSFIPL->EV5IPL
792        mfpr	r29, pt_kgp		// update gp
793        mtpr	r14, ev5__ipl		// load the new IPL into Ibox
794#else
795// Moved the following three lines to sys_interrupt to make room for debug
796//	extbl	r12, r14, r14		// Translate new OSFIPL->EV5IPL
797//	mfpr	r29, pt_kgp		// update gp
798
799//	mtpr	r14, ev5__ipl		// load the new IPL into Ibox
800#endif
801        br	r31, sys_interrupt	// Go handle interrupt
802
803
804
805// .sbttl	"ITBMISS- Istream TBmiss Trap Entry Point"
806
807//+
808// ITBMISS - offset 0180
809// Entry:
810//	Vectored into via hardware trap on Istream translation buffer miss.
811//
812// Function:
813//       Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
814//       Can trap into DTBMISS_DOUBLE.
815//       This routine can use the PALshadow registers r8, r9, and r10
816//
817//-
818
819        HDW_VECTOR(PAL_ITB_MISS_ENTRY)
820Trap_Itbmiss:
821#if real_mm == 0
822
823
824                                        // Simple 1-1 va->pa mapping
825
826        nop				// Pad to align to E1
827        mfpr 	r8, exc_addr
828
829        srl	r8, page_offset_size_bits, r9
830        sll	r9, 32, r9
831
832        lda	r9, 0x3301(r9)		// Make PTE, V set, all KRE, URE, KWE, UWE
833        mtpr	r9, itb_pte		// E1
834
835        hw_rei_stall			// Nital says I don't have to obey shadow wait rule here.
836#else
837
838                                        // Real MM mapping
839        nop
840        mfpr	r8, ev5__ifault_va_form // Get virtual address of PTE.
841
842        nop
843        mfpr    r10, exc_addr           // Get PC of faulting instruction in case of DTBmiss.
844
845pal_itb_ldq:
846        ld_vpte r8, 0(r8)             	// Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
847        mtpr	r10, exc_addr		// Restore exc_address if there was a trap.
848
849        mfpr	r31, ev5__va		// Unlock VA in case there was a double miss
850        nop
851
852        and	r8, osfpte_m_foe, r25 	// Look for FOE set.
853        blbc	r8, invalid_ipte_handler // PTE not valid.
854
855        nop
856        bne	r25, foe_ipte_handler	// FOE is set
857
858        nop
859        mtpr	r8, ev5__itb_pte	// Ibox remembers the VA, load the PTE into the ITB.
860
861        hw_rei_stall			//
862
863#endif
864
865
866
867
868// .sbttl	"DTBMISS_SINGLE	- Dstream Single TBmiss Trap Entry Point"
869
870//+
871// DTBMISS_SINGLE - offset 0200
872// Entry:
873//	Vectored into via hardware trap on Dstream single translation buffer miss.
874//
875// Function:
876//	Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
877//	Can trap into DTBMISS_DOUBLE.
878//	This routine can use the PALshadow registers r8, r9, and r10
879//-
880
881        HDW_VECTOR(PAL_DTB_MISS_ENTRY)
882Trap_Dtbmiss_Single:
883#if real_mm == 0
884                                        // Simple 1-1 va->pa mapping
885        mfpr 	r8, va			// E0
886        srl	r8, page_offset_size_bits, r9
887
888        sll	r9, 32, r9
889        lda	r9, 0x3301(r9)		// Make PTE, V set, all KRE, URE, KWE, UWE
890
891        mtpr	r9, dtb_pte		// E0
892        nop				// Pad to align to E0
893
894
895
896        mtpr	r8, dtb_tag		// E0
897        nop
898
899        nop				// Pad tag write
900        nop
901
902        nop				// Pad tag write
903        nop
904
905        hw_rei
906#else
907        mfpr	r8, ev5__va_form      	// Get virtual address of PTE - 1 cycle delay.  E0.
908        mfpr    r10, exc_addr           // Get PC of faulting instruction in case of error.  E1.
909
910//	DEBUGSTORE(0x45)
911//	DEBUG_EXC_ADDR()
912                                        // Real MM mapping
913        mfpr    r9, ev5__mm_stat	// Get read/write bit.  E0.
914        mtpr	r10, pt6		// Stash exc_addr away
915
916pal_dtb_ldq:
917        ld_vpte r8, 0(r8)             	// Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
918        nop				// Pad MF VA
919
920        mfpr	r10, ev5__va            // Get original faulting VA for TB load.  E0.
921        nop
922
923        mtpr    r8, ev5__dtb_pte       	// Write DTB PTE part.   E0.
924        blbc    r8, invalid_dpte_handler    // Handle invalid PTE
925
926        mtpr    r10, ev5__dtb_tag      	// Write DTB TAG part, completes DTB load.  No virt ref for 3 cycles.
927        mfpr	r10, pt6
928
929                                        // Following 2 instructions take 2 cycles
930        mtpr    r10, exc_addr           // Return linkage in case we trapped.  E1.
931        mfpr	r31,  pt0		// Pad the write to dtb_tag
932
933        hw_rei                          // Done, return
934#endif
935
936
937
938
939// .sbttl	"DTBMISS_DOUBLE	- Dstream Double TBmiss Trap Entry Point"
940
941//+
942// DTBMISS_DOUBLE - offset 0280
943// Entry:
944//	Vectored into via hardware trap on Double TBmiss from single miss flows.
945//
946//	r8   - faulting VA
947//	r9   - original MMstat
948//	r10 - original exc_addr (both itb,dtb miss)
949//	pt6 - original exc_addr (dtb miss flow only)
950//	VA IPR - locked with original faulting VA
951//
952// Function:
953// 	Get PTE, if valid load TB and return.
954//	If not valid then take TNV/ACV exception.
955//
956//	pt4 and pt5 are reserved for this flow.
957//
958//
959//-
960
961        HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
962Trap_Dtbmiss_double:
963#if ldvpte_bug_fix != 0
964        mtpr 	r8, pt4			// save r8 to do exc_addr check
965        mfpr	r8, exc_addr
966        blbc	r8, Trap_Dtbmiss_Single	//if not in palmode, should be in the single routine, dummy!
967        mfpr	r8, pt4			// restore r8
968#endif
969        nop
970        mtpr	r22, pt5		// Get some scratch space. E1.
971                                        // Due to virtual scheme, we can skip the first lookup and go
972                                        // right to fetch of level 2 PTE
973        sll     r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22  // Clean off upper bits of VA
974        mtpr	r21, pt4		// Get some scratch space. E1.
975
976        srl    	r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
977        mfpr	r21, pt_ptbr		// Get physical address of the page table.
978
979        nop
980        addq    r21, r22, r21           // Index into page table for level 2 PTE.
981
982        sll    	r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22  // Clean off upper bits of VA
983        ldqp   	r21, 0(r21)            	// Get level 2 PTE (addr<2:0> ignored)
984
985        srl    	r22, 61-page_seg_size_bits, r22	// Get Va<seg1>*8
986        blbc 	r21, double_pte_inv		// Check for Invalid PTE.
987
988        srl    	r21, 32, r21			// extract PFN from PTE
989        sll     r21, page_offset_size_bits, r21	// get PFN * 2^13 for add to <seg3>*8
990
991        addq    r21, r22, r21           // Index into page table for level 3 PTE.
992        nop
993
994        ldqp   	r21, 0(r21)            	// Get level 3 PTE (addr<2:0> ignored)
995        blbc	r21, double_pte_inv	// Check for invalid PTE.
996
997        mtpr	r21, ev5__dtb_pte	// Write the PTE.  E0.
998        mfpr	r22, pt5		// Restore scratch register
999
1000        mtpr	r8, ev5__dtb_tag	// Write the TAG. E0.  No virtual references in subsequent 3 cycles.
1001        mfpr	r21, pt4		// Restore scratch register
1002
1003        nop				// Pad write to tag.
1004        nop
1005
1006        nop				// Pad write to tag.
1007        nop
1008
1009        hw_rei
1010
1011
1012
1013// .sbttl	"UNALIGN -- Dstream unalign trap"
1014//+
1015// UNALIGN - offset 0300
1016// Entry:
1017//	Vectored into via hardware trap on unaligned Dstream reference.
1018//
1019// Function:
1020//	Build stack frame
1021//	a0 <- Faulting VA
1022//	a1 <- Opcode
1023//	a2 <- src/dst register number
1024//	vector via entUna
1025//-
1026
1027        HDW_VECTOR(PAL_UNALIGN_ENTRY)
1028Trap_Unalign:
1029/*	DEBUGSTORE(0x47)*/
1030        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1031        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1032
1033        mfpr	r8, ev5__mm_stat	// Get mmstat --ok to use r8, no tbmiss
1034        mfpr	r14, exc_addr		// get pc
1035
1036        srl	r8, mm_stat_v_ra, r13	// Shift Ra field to ls bits
1037        blbs	r14, pal_pal_bug_check  // Bugcheck if unaligned in PAL
1038
1039        blbs	r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
1040                                        // not set, must be a load
1041        and	r13, 0x1F, r8		// isolate ra
1042
1043        cmpeq   r8, 0x1F, r8		// check for r31/F31
1044        bne     r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
1045
1046UNALIGN_NO_DISMISS:
1047        bis	r11, r31, r12		// Save PS
1048        bge	r25, UNALIGN_NO_DISMISS_10_		// no stack swap needed if cm=kern
1049
1050
1051        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1052                                        //     no virt ref for next 2 cycles
1053        mtpr	r30, pt_usp		// save user stack
1054
1055        bis	r31, r31, r12		// Set new PS
1056        mfpr	r30, pt_ksp
1057
1058UNALIGN_NO_DISMISS_10_:
1059        mfpr	r25, ev5__va		// Unlock VA
1060        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1061
1062        mtpr	r25, pt0		// Stash VA
1063        stq	r18, osfsf_a2(sp) 	// a2
1064
1065        stq	r11, osfsf_ps(sp)	// save old ps
1066        srl	r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
1067
1068        stq	r29, osfsf_gp(sp) 	// save gp
1069        addq	r14, 4, r14		// inc PC past the ld/st
1070
1071        stq	r17, osfsf_a1(sp)	// a1
1072        and	r25, mm_stat_m_opcode, r17// Clean opocde for a1
1073
1074        stq	r16, osfsf_a0(sp)	// save regs
1075        mfpr	r16, pt0		// a0 <- va/unlock
1076
1077        stq	r14, osfsf_pc(sp)	// save pc
1078        mfpr	r25, pt_entuna		// get entry point
1079
1080
1081        bis	r12, r31, r11		// update ps
1082        br 	r31, unalign_trap_cont
1083
1084
1085
1086
1087// .sbttl	"DFAULT	- Dstream Fault Trap Entry Point"
1088
1089//+
1090// DFAULT - offset 0380
1091// Entry:
1092//	Vectored into via hardware trap on dstream fault or sign check error on DVA.
1093//
1094// Function:
1095//	Ignore faults on FETCH/FETCH_M
1096//	Check for DFAULT in PAL
1097//	Build stack frame
1098//	a0 <- Faulting VA
1099//	a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
1100//	a2 <- R/W
1101//	vector via entMM
1102//
1103//-
1104        HDW_VECTOR(PAL_D_FAULT_ENTRY)
1105Trap_Dfault:
1106//	DEBUGSTORE(0x48)
1107        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1108        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1109
1110        mfpr	r13, ev5__mm_stat	// Get mmstat
1111        mfpr	r8, exc_addr		// get pc, preserve r14
1112
1113        srl	r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
1114        blbs	r8, dfault_in_pal
1115
1116        bis	r8, r31, r14		// move exc_addr to correct place
1117        bis	r11, r31, r12		// Save PS
1118
1119        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1120                                        //     no virt ref for next 2 cycles
1121        and	r9, mm_stat_m_opcode, r9 // Clean all but opcode
1122
1123        cmpeq   r9, evx_opc_sync, r9 	// Is the opcode fetch/fetchm?
1124        bne     r9, dfault_fetch_ldr31_err   // Yes, dismiss the fault
1125
1126        //dismiss exception if load to r31/f31
1127        blbs	r13, dfault_no_dismiss	// mm_stat<0> set on store or fetchm
1128
1129                                        // not a store or fetch, must be a load
1130        srl	r13, mm_stat_v_ra, r9	// Shift rnum to low bits
1131
1132        and	r9, 0x1F, r9		// isolate rnum
1133        nop
1134
1135        cmpeq   r9, 0x1F, r9   	// Is the rnum r31 or f31?
1136        bne     r9, dfault_fetch_ldr31_err    // Yes, dismiss the fault
1137
1138dfault_no_dismiss:
1139        and	r13, 0xf, r13	// Clean extra bits in mm_stat
1140        bge	r25, dfault_trap_cont	// no stack swap needed if cm=kern
1141
1142
1143        mtpr	r30, pt_usp		// save user stack
1144        bis	r31, r31, r12		// Set new PS
1145
1146        mfpr	r30, pt_ksp
1147        br	r31, dfault_trap_cont
1148
1149
1150
1151
1152
1153// .sbttl	"MCHK	-  Machine Check Trap Entry Point"
1154
1155//+
1156// MCHK - offset 0400
1157// Entry:
1158//	Vectored into via hardware trap on machine check.
1159//
1160// Function:
1161//
1162//-
1163
1164        HDW_VECTOR(PAL_MCHK_ENTRY)
1165Trap_Mchk:
1166        DEBUGSTORE(0x49)
1167        mtpr    r31, ic_flush_ctl       // Flush the Icache
1168        br      r31, sys_machine_check
1169
1170
1171
1172
1173// .sbttl	"OPCDEC	-  Illegal Opcode Trap Entry Point"
1174
1175//+
1176// OPCDEC - offset 0480
1177// Entry:
1178//	Vectored into via hardware trap on illegal opcode.
1179//
1180//	Build stack frame
1181//	a0 <- code
1182//	a1 <- unpred
1183//	a2 <- unpred
1184//	vector via entIF
1185//
1186//-
1187
1188        HDW_VECTOR(PAL_OPCDEC_ENTRY)
1189Trap_Opcdec:
1190        DEBUGSTORE(0x4a)
1191//simos	DEBUG_EXC_ADDR()
1192        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1193        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1194
1195        mfpr	r14, exc_addr		// get pc
1196        blbs	r14, pal_pal_bug_check	// check opcdec in palmode
1197
1198        bis	r11, r31, r12		// Save PS
1199        bge	r25, TRAP_OPCDEC_10_		// no stack swap needed if cm=kern
1200
1201
1202        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1203                                        //     no virt ref for next 2 cycles
1204        mtpr	r30, pt_usp		// save user stack
1205
1206        bis	r31, r31, r12		// Set new PS
1207        mfpr	r30, pt_ksp
1208
1209TRAP_OPCDEC_10_:
1210        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1211        addq	r14, 4, r14		// inc pc
1212
1213        stq	r16, osfsf_a0(sp)	// save regs
1214        bis	r31, osf_a0_opdec, r16	// set a0
1215
1216        stq	r11, osfsf_ps(sp)	// save old ps
1217        mfpr	r13, pt_entif		// get entry point
1218
1219        stq	r18, osfsf_a2(sp) 	// a2
1220        stq	r17, osfsf_a1(sp)	// a1
1221
1222        stq	r29, osfsf_gp(sp) 	// save gp
1223        stq	r14, osfsf_pc(sp)	// save pc
1224
1225        bis	r12, r31, r11		// update ps
1226        mtpr	r13, exc_addr		// load exc_addr with entIF
1227                                        // 1 cycle to hw_rei, E1
1228
1229        mfpr	r29, pt_kgp		// get the kgp, E1
1230
1231        hw_rei_spe			// done, E1
1232
1233
1234
1235
1236
1237
1238// .sbttl	"ARITH	-  Arithmetic Exception Trap Entry Point"
1239
1240//+
1241// ARITH - offset 0500
1242// Entry:
1243//	Vectored into via hardware trap on arithmetic excpetion.
1244//
1245// Function:
1246//	Build stack frame
1247//	a0 <- exc_sum
1248//	a1 <- exc_mask
1249//	a2 <- unpred
1250//	vector via entArith
1251//
1252//-
1253        HDW_VECTOR(PAL_ARITH_ENTRY)
1254Trap_Arith:
1255        DEBUGSTORE(0x4b)
1256        and	r11, osfps_m_mode, r12 // get mode bit
1257        mfpr	r31, ev5__va		// unlock mbox
1258
1259        bis	r11, r31, r25		// save ps
1260        mfpr	r14, exc_addr		// get pc
1261
1262        nop
1263        blbs	r14, pal_pal_bug_check	// arith trap from PAL
1264
1265        mtpr    r31, ev5__dtb_cm        // Set Mbox current mode to kernel -
1266                                        //     no virt ref for next 2 cycles
1267        beq	r12, TRAP_ARITH_10_		// if zero we are in kern now
1268
1269        bis	r31, r31, r25		// set the new ps
1270        mtpr	r30, pt_usp		// save user stack
1271
1272        nop
1273        mfpr	r30, pt_ksp		// get kern stack
1274
1275TRAP_ARITH_10_: 	lda	sp, 0-osfsf_c_size(sp)	// allocate stack space
1276        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1277
1278        nop				// Pad current mode write and stq
1279        mfpr	r13, ev5__exc_sum	// get the exc_sum
1280
1281        mfpr	r12, pt_entarith
1282        stq	r14, osfsf_pc(sp)	// save pc
1283
1284        stq	r17, osfsf_a1(sp)
1285        mfpr    r17, ev5__exc_mask      // Get exception register mask IPR - no mtpr exc_sum in next cycle
1286
1287        stq	r11, osfsf_ps(sp)	// save ps
1288        bis	r25, r31, r11		// set new ps
1289
1290        stq	r16, osfsf_a0(sp)	// save regs
1291        srl	r13, exc_sum_v_swc, r16// shift data to correct position
1292
1293        stq	r18, osfsf_a2(sp)
1294//	pvc_violate 354			// ok, but make sure reads of exc_mask/sum are not in same trap shadow
1295        mtpr	r31, ev5__exc_sum	// Unlock exc_sum and exc_mask
1296
1297        stq	r29, osfsf_gp(sp)
1298        mtpr	r12, exc_addr		// Set new PC - 1 bubble to hw_rei - E1
1299
1300        mfpr	r29, pt_kgp		// get the kern gp - E1
1301        hw_rei_spe			// done - E1
1302
1303
1304
1305
1306
1307
1308// .sbttl	"FEN	-  Illegal Floating Point Operation Trap Entry Point"
1309
1310//+
1311// FEN - offset 0580
1312// Entry:
1313//	Vectored into via hardware trap on illegal FP op.
1314//
1315// Function:
1316//	Build stack frame
1317//	a0 <- code
1318//	a1 <- unpred
1319//	a2 <- unpred
1320//	vector via entIF
1321//
1322//-
1323
1324        HDW_VECTOR(PAL_FEN_ENTRY)
1325Trap_Fen:
1326        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1327        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1328
1329        mfpr	r14, exc_addr		// get pc
1330        blbs	r14, pal_pal_bug_check	// check opcdec in palmode
1331
1332        mfpr	r13, ev5__icsr
1333        nop
1334
1335        bis	r11, r31, r12		// Save PS
1336        bge	r25, TRAP_FEN_10_		// no stack swap needed if cm=kern
1337
1338        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1339                                        //     no virt ref for next 2 cycles
1340        mtpr	r30, pt_usp		// save user stack
1341
1342        bis	r31, r31, r12		// Set new PS
1343        mfpr	r30, pt_ksp
1344
1345TRAP_FEN_10_:
1346        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1347        srl     r13, icsr_v_fpe, r25   // Shift FP enable to bit 0
1348
1349
1350        stq	r16, osfsf_a0(sp)	// save regs
1351        mfpr	r13, pt_entif		// get entry point
1352
1353        stq	r18, osfsf_a2(sp) 	// a2
1354        stq	r11, osfsf_ps(sp)	// save old ps
1355
1356        stq	r29, osfsf_gp(sp) 	// save gp
1357        bis	r12, r31, r11		// set new ps
1358
1359        stq	r17, osfsf_a1(sp)	// a1
1360        blbs	r25,fen_to_opcdec	// If FP is enabled, this is really OPCDEC.
1361
1362        bis	r31, osf_a0_fen, r16	// set a0
1363        stq	r14, osfsf_pc(sp)	// save pc
1364
1365        mtpr	r13, exc_addr		// load exc_addr with entIF
1366                                        // 1 cycle to hw_rei -E1
1367
1368        mfpr	r29, pt_kgp		// get the kgp -E1
1369
1370        hw_rei_spe			// done -E1
1371
1372//	FEN trap was taken, but the fault is really opcdec.
1373        ALIGN_BRANCH
1374fen_to_opcdec:
1375        addq	r14, 4, r14		// save PC+4
1376        bis	r31, osf_a0_opdec, r16	// set a0
1377
1378        stq	r14, osfsf_pc(sp)	// save pc
1379        mtpr	r13, exc_addr		// load exc_addr with entIF
1380                                        // 1 cycle to hw_rei
1381
1382        mfpr	r29, pt_kgp		// get the kgp
1383        hw_rei_spe			// done
1384
1385
1386
1387// .sbttl	"Misc handlers"
1388                                                // Start area for misc code.
1389//+
1390//dfault_trap_cont
1391//	A dfault trap has been taken.  The sp has been updated if necessary.
1392//	Push a stack frame a vector via entMM.
1393//
1394//	Current state:
1395//		r12 - new PS
1396//		r13 - MMstat
1397//		VA - locked
1398//
1399//-
1400        ALIGN_BLOCK
1401dfault_trap_cont:
1402        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1403        mfpr	r25, ev5__va		// Fetch VA/unlock
1404
1405        stq	r18, osfsf_a2(sp) 	// a2
1406        and	r13, 1, r18		// Clean r/w bit for a2
1407
1408        stq	r16, osfsf_a0(sp)	// save regs
1409        bis	r25, r31, r16		// a0 <- va
1410
1411        stq	r17, osfsf_a1(sp)	// a1
1412        srl	r13, 1, r17		// shift fault bits to right position
1413
1414        stq	r11, osfsf_ps(sp)	// save old ps
1415        bis	r12, r31, r11		// update ps
1416
1417        stq	r14, osfsf_pc(sp)	// save pc
1418        mfpr	r25, pt_entmm		// get entry point
1419
1420        stq	r29, osfsf_gp(sp) 	// save gp
1421        cmovlbs	r17, 1, r17		// a2. acv overrides fox.
1422
1423        mtpr	r25, exc_addr		// load exc_addr with entMM
1424                                        // 1 cycle to hw_rei
1425        mfpr	r29, pt_kgp		// get the kgp
1426
1427        hw_rei_spe			// done
1428
1429//+
1430//unalign_trap_cont
1431//	An unalign trap has been taken.  Just need to finish up a few things.
1432//
1433//	Current state:
1434//		r25 - entUna
1435//		r13 - shifted MMstat
1436//
1437//-
1438        ALIGN_BLOCK
1439unalign_trap_cont:
1440        mtpr	r25, exc_addr		// load exc_addr with entUna
1441                                        // 1 cycle to hw_rei
1442
1443
1444        mfpr	r29, pt_kgp		// get the kgp
1445        and	r13, mm_stat_m_ra, r18	// Clean Ra for a2
1446
1447        hw_rei_spe			// done
1448
1449
1450
1451//+
1452// dfault_in_pal
1453//	Dfault trap was taken, exc_addr points to a PAL PC.
1454//	r9 - mmstat<opcode> right justified
1455//	r8 - exception address
1456//
1457//	These are the cases:
1458//		opcode was STQ -- from a stack builder, KSP not valid halt
1459//			r14 - original exc_addr
1460//			r11 - original PS
1461//		opcode was STL_C  -- rti or retsys clear lock_flag by stack write,
1462//					KSP not valid halt
1463//			r11 - original PS
1464//			r14 - original exc_addr
1465//		opcode was LDQ -- retsys or rti stack read, KSP not valid halt
1466//			r11 - original PS
1467//			r14 - original exc_addr
1468//		opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
1469//			r10 - original exc_addr
1470//			r11 - original PS
1471//
1472//
1473//-
1474        ALIGN_BLOCK
1475dfault_in_pal:
1476        DEBUGSTORE(0x50)
1477        bic     r8, 3, r8            // Clean PC
1478        mfpr	r9, pal_base
1479
1480        mfpr	r31, va			// unlock VA
1481#if real_mm != 0
1482                        // if not real_mm, should never get here from miss flows
1483
1484        subq    r9, r8, r8            // pal_base - offset
1485
1486        lda     r9, pal_itb_ldq-pal_base(r8)
1487        nop
1488
1489        beq 	r9, dfault_do_bugcheck
1490        lda     r9, pal_dtb_ldq-pal_base(r8)
1491
1492        beq 	r9, dfault_do_bugcheck
1493#endif
1494
1495//
1496// KSP invalid halt case --
1497ksp_inval_halt:
1498        DEBUGSTORE(76)
1499        bic	r11, osfps_m_mode, r11	// set ps to kernel mode
1500        mtpr    r0, pt0
1501
1502        mtpr	r31, dtb_cm		// Make sure that the CM IPRs are all kernel mode
1503        mtpr	r31, ips
1504
1505        mtpr	r14, exc_addr		// Set PC to instruction that caused trouble
1506//orig	pvc_jsr updpcb, bsr=1
1507        bsr     r0, pal_update_pcb      // update the pcb
1508
1509        lda     r0, hlt_c_ksp_inval(r31)  // set halt code to hw halt
1510        br      r31, sys_enter_console  // enter the console
1511
1512        ALIGN_BRANCH
1513dfault_do_bugcheck:
1514        bis	r10, r31, r14		// bugcheck expects exc_addr in r14
1515        br	r31, pal_pal_bug_check
1516
1517
1518        ALIGN_BLOCK
1519//+
1520// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
1521//	On entry -
1522//		r14 - exc_addr
1523//		VA is locked
1524//
1525//-
1526dfault_fetch_ldr31_err:
1527        mtpr	r11, ev5__dtb_cm
1528        mtpr	r11, ev5__ps		// Make sure ps hasn't changed
1529
1530        mfpr	r31, va			// unlock the mbox
1531        addq	r14, 4, r14		// inc the pc to skip the fetch
1532
1533        mtpr	r14, exc_addr		// give ibox new PC
1534        mfpr	r31, pt0		// pad exc_addr write
1535
1536        hw_rei
1537
1538
1539
1540        ALIGN_BLOCK
1541//+
1542// sys_from_kern
1543//	callsys from kernel mode - OS bugcheck machine check
1544//
1545//-
1546sys_from_kern:
1547        mfpr	r14, exc_addr			// PC points to call_pal
1548        subq	r14, 4, r14
1549
1550        lda	r25, mchk_c_os_bugcheck(r31)    // fetch mchk code
1551        br      r31, pal_pal_mchk
1552
1553
1554// .sbttl	"Continuation of long call_pal flows"
1555        ALIGN_BLOCK
1556//+
1557// wrent_tbl
1558//	Table to write *int in paltemps.
1559//	4 instructions/entry
1560//	r16 has new value
1561//
1562//-
1563wrent_tbl:
1564//orig	pvc_jsr	wrent, dest=1
1565        nop
1566        mtpr	r16, pt_entint
1567
1568        mfpr	r31, pt0		// Pad for mt->mf paltemp rule
1569        hw_rei
1570
1571
1572//orig	pvc_jsr	wrent, dest=1
1573        nop
1574        mtpr	r16, pt_entarith
1575
1576        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1577        hw_rei
1578
1579
1580//orig	pvc_jsr	wrent, dest=1
1581        nop
1582        mtpr	r16, pt_entmm
1583
1584        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1585        hw_rei
1586
1587
1588//orig	pvc_jsr	wrent, dest=1
1589        nop
1590        mtpr	r16, pt_entif
1591
1592        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1593        hw_rei
1594
1595
1596//orig	pvc_jsr	wrent, dest=1
1597        nop
1598        mtpr	r16, pt_entuna
1599
1600        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1601        hw_rei
1602
1603
1604//orig	pvc_jsr	wrent, dest=1
1605        nop
1606        mtpr	r16, pt_entsys
1607
1608        mfpr    r31, pt0                // Pad for mt->mf paltemp rule
1609        hw_rei
1610
1611        ALIGN_BLOCK
1612//+
1613// tbi_tbl
1614//	Table to do tbi instructions
1615//	4 instructions per entry
1616//-
1617tbi_tbl:
1618        // -2 tbia
1619//orig	pvc_jsr tbi, dest=1
1620        mtpr	r31, ev5__dtb_ia	// Flush DTB
1621        mtpr	r31, ev5__itb_ia	// Flush ITB
1622
1623#if icflush_on_tbix != 0
1624
1625
1626        br	r31, pal_ic_flush		// Flush Icache
1627#else
1628
1629        hw_rei_stall
1630#endif
1631
1632        nop				// Pad table
1633
1634        // -1 tbiap
1635//orig	pvc_jsr tbi, dest=1
1636        mtpr	r31, ev5__dtb_iap	// Flush DTB
1637        mtpr	r31, ev5__itb_iap	// Flush ITB
1638
1639#if icflush_on_tbix != 0
1640
1641
1642        br	r31, pal_ic_flush		// Flush Icache
1643#else
1644
1645        hw_rei_stall
1646#endif
1647
1648        nop				// Pad table
1649
1650
1651        // 0 unused
1652//orig	pvc_jsr tbi, dest=1
1653        hw_rei				// Pad table
1654        nop
1655        nop
1656        nop
1657
1658
1659        // 1 tbisi
1660//orig	pvc_jsr tbi, dest=1
1661#if icflush_on_tbix != 0
1662
1663
1664
1665        nop
1666        br	r31, pal_ic_flush_and_tbisi		// Flush Icache
1667        nop
1668        nop				// Pad table
1669#else
1670
1671        nop
1672        nop
1673        mtpr	r17, ev5__itb_is	// Flush ITB
1674        hw_rei_stall
1675#endif
1676
1677
1678
1679        // 2 tbisd
1680//orig	pvc_jsr tbi, dest=1
1681        mtpr	r17, ev5__dtb_is	// Flush DTB.
1682        nop
1683
1684        nop
1685        hw_rei_stall
1686
1687
1688        // 3 tbis
1689//orig	pvc_jsr tbi, dest=1
1690        mtpr	r17, ev5__dtb_is	// Flush DTB
1691#if icflush_on_tbix != 0
1692
1693
1694        br	r31, pal_ic_flush_and_tbisi	// Flush Icache and ITB
1695#else
1696        br	r31, tbi_finish
1697        ALIGN_BRANCH
1698tbi_finish:
1699        mtpr	r17, ev5__itb_is	// Flush ITB
1700        hw_rei_stall
1701#endif
1702
1703
1704
1705        ALIGN_BLOCK
1706//+
1707// bpt_bchk_common:
1708//	Finish up the bpt/bchk instructions
1709//-
1710bpt_bchk_common:
1711        stq	r18, osfsf_a2(sp) 	// a2
1712        mfpr	r13, pt_entif		// get entry point
1713
1714        stq	r12, osfsf_ps(sp)	// save old ps
1715        stq	r14, osfsf_pc(sp)	// save pc
1716
1717        stq	r29, osfsf_gp(sp) 	// save gp
1718        mtpr	r13, exc_addr		// load exc_addr with entIF
1719                                        // 1 cycle to hw_rei
1720
1721        mfpr	r29, pt_kgp		// get the kgp
1722
1723
1724        hw_rei_spe			// done
1725
1726
1727        ALIGN_BLOCK
1728//+
1729// rti_to_user
1730//	Finish up the rti instruction
1731//-
1732rti_to_user:
1733        mtpr	r11, ev5__dtb_cm	// set Mbox current mode - no virt ref for 2 cycles
1734        mtpr	r11, ev5__ps		// set Ibox current mode - 2 bubble to hw_rei
1735
1736        mtpr	r31, ev5__ipl		// set the ipl. No hw_rei for 2 cycles
1737        mtpr	r25, pt_ksp		// save off incase RTI to user
1738
1739        mfpr	r30, pt_usp
1740        hw_rei_spe			// and back
1741
1742
1743        ALIGN_BLOCK
1744//+
1745// rti_to_kern
1746//	Finish up the rti instruction
1747//-
1748rti_to_kern:
1749        and	r12, osfps_m_ipl, r11	// clean ps
1750        mfpr	r12, pt_intmask		// get int mask
1751
1752        extbl	r12, r11, r12		// get mask for this ipl
1753        mtpr	r25, pt_ksp		// save off incase RTI to user
1754
1755        mtpr	r12, ev5__ipl		// set the new ipl.
1756        or	r25, r31, sp		// sp
1757
1758//	pvc_violate 217			// possible hidden mt->mf ipl not a problem in callpals
1759        hw_rei
1760
1761        ALIGN_BLOCK
1762//+
1763// swpctx_cont
1764//	Finish up the swpctx instruction
1765//-
1766
1767swpctx_cont:
1768#if ev5_p1 != 0
1769
1770
1771        bic	r25, r24, r25		// clean icsr<FPE>
1772        get_impure r8			// get impure pointer
1773
1774        sll	r12, icsr_v_fpe, r12	// shift new fen to pos
1775        fix_impure_ipr r8		// adjust impure pointer
1776
1777        restore_reg1 pmctr_ctl, r8, r8, ipr=1	// "ldqp" - get pmctr_ctl bits
1778        srl	r23, 32, r24		// move asn to low asn pos
1779
1780        ldqp	r14, osfpcb_q_mmptr(r16)// get new mmptr
1781        srl	r22, osfpcb_v_pme, r22		// get pme down to bit 0
1782
1783        or	r25, r12, r25		// icsr with new fen
1784        sll	r24, itb_asn_v_asn, r12
1785
1786#else
1787
1788        bic	r25, r24, r25		// clean icsr<FPE,PMP>
1789        sll	r12, icsr_v_fpe, r12	// shift new fen to pos
1790
1791        ldqp	r14, osfpcb_q_mmptr(r16)// get new mmptr
1792        srl	r22, osfpcb_v_pme, r22	// get pme down to bit 0
1793
1794        or	r25, r12, r25		// icsr with new fen
1795        srl	r23, 32, r24		// move asn to low asn pos
1796
1797        and	r22, 1, r22
1798        sll	r24, itb_asn_v_asn, r12
1799
1800        sll	r22, icsr_v_pmp, r22
1801        nop
1802
1803        or	r25, r22, r25		// icsr with new pme
1804#endif
1805
1806        sll	r24, dtb_asn_v_asn, r24
1807
1808        subl	r23, r13, r13		// gen new cc offset
1809        mtpr	r12, itb_asn		// no hw_rei_stall in 0,1,2,3,4
1810
1811        mtpr	r24, dtb_asn		// Load up new ASN
1812        mtpr	r25, icsr		// write the icsr
1813
1814        sll	r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1815        ldqp	r25, osfpcb_q_usp(r16)	// get new usp
1816
1817        insll	r13, 4, r13		// >> 32
1818//	pvc_violate 379			// ldqp can't trap except replay.  only problem if mf same ipr in same shadow
1819        mtpr	r14, pt_ptbr		// load the new ptbr
1820
1821        mtpr	r13, cc			// set new offset
1822        ldqp	r30, osfpcb_q_ksp(r16)	// get new ksp
1823
1824//	pvc_violate 379			// ldqp can't trap except replay.  only problem if mf same ipr in same shadow
1825        mtpr	r25, pt_usp		// save usp
1826
1827#if ev5_p1 != 0
1828
1829
1830        blbc	r8, no_pm_change		// if monitoring all processes -- no need to change pm
1831
1832        // otherwise, monitoring select processes - update pm
1833        lda	r25, 0x3F(r31)
1834        cmovlbc	r22, r31, r8			// if pme set, disable counters, otherwise use saved encodings
1835
1836        sll	r25, pmctr_v_ctl2, r25		// create ctl field bit mask
1837        mfpr	r22, ev5__pmctr
1838
1839        and	r8, r25, r8			// mask new ctl value
1840        bic	r22, r25, r22			// clear ctl field in pmctr
1841
1842        or	r8, r22, r8
1843        mtpr	r8, ev5__pmctr
1844
1845no_pm_change:
1846#endif
1847
1848
1849#if osf_chm_fix != 0
1850
1851
1852        p4_fixup_hw_rei_stall		// removes this section for Pass 4 by placing a hw_rei_stall here
1853
1854#if build_fixed_image != 0
1855
1856
1857        hw_rei_stall
1858#else
1859
1860        mfpr	r9, pt_pcbb		// get FEN
1861#endif
1862
1863        ldqp	r9, osfpcb_q_fen(r9)
1864        blbc	r9, no_pm_change_10_			// skip if FEN disabled
1865
1866        mb				// ensure no outstanding fills
1867        lda r12, 1<<dc_mode_v_dc_ena(r31)
1868        mtpr	r12, dc_mode		// turn dcache on so we can flush it
1869        nop				// force correct slotting
1870        mfpr	r31, pt0		// no mbox instructions in 1,2,3,4
1871        mfpr	r31, pt0		// no mbox instructions in 1,2,3,4
1872        mfpr	r31, pt0		// no mbox instructions in 1,2,3,4
1873        mfpr	r31, pt0		// no mbox instructions in 1,2,3,4
1874
1875        lda	r8, 0(r31)		// flood the dcache with junk data
1876no_pm_change_5_:	ldqp	r31, 0(r8)
1877        lda	r8, 0x20(r8)		// touch each cache block
1878        srl	r8, 13, r9
1879        blbc	r9, no_pm_change_5_
1880
1881        mb				// ensure no outstanding fills
1882        mtpr	r31, dc_mode		// turn the dcache back off
1883        nop				// force correct slotting
1884        mfpr	r31, pt0		// no hw_rei_stall in 0,1
1885#endif
1886
1887
1888no_pm_change_10_:	hw_rei_stall			// back we go
1889
1890        ALIGN_BLOCK
1891//+
1892// swppal_cont - finish up the swppal call_pal
1893//-
1894
1895swppal_cont:
1896        mfpr	r2, pt_misc		// get misc bits
1897        sll	r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1898        or	r2, r0, r2		// set the bit
1899        mtpr	r31, ev5__alt_mode	// ensure alt_mode set to 0 (kernel)
1900        mtpr	r2, pt_misc		// update the chip
1901
1902        or	r3, r31, r4
1903        mfpr	r3, pt_impure		// pass pointer to the impure area in r3
1904//orig	fix_impure_ipr	r3		// adjust impure pointer for ipr read
1905//orig	restore_reg1	bc_ctl, r1, r3, ipr=1		// pass cns_bc_ctl in r1
1906//orig	restore_reg1	bc_config, r2, r3, ipr=1	// pass cns_bc_config in r2
1907//orig	unfix_impure_ipr r3		// restore impure pointer
1908        lda	r3, CNS_Q_IPR(r3)
1909        RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1910        RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1911        lda	r3, -CNS_Q_IPR(r3)
1912
1913        or	r31, r31, r0		// set status to success
1914//	pvc_violate	1007
1915        jmp	r31, (r4)		// and call our friend, it's her problem now
1916
1917
1918swppal_fail:
1919        addq	r0, 1, r0		// set unknown pal or not loaded
1920        hw_rei				// and return
1921
1922
1923// .sbttl	"Memory management"
1924
1925        ALIGN_BLOCK
1926//+
1927//foe_ipte_handler
1928// IFOE detected on level 3 pte, sort out FOE vs ACV
1929//
1930// on entry:
1931//	with
1932//	R8	 = pte
1933//	R10	 = pc
1934//
1935// Function
1936//	Determine TNV vs ACV vs FOE. Build stack and dispatch
1937//	Will not be here if TNV.
1938//-
1939
1940foe_ipte_handler:
1941        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1942        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
1943
1944        bis	r11, r31, r12		// Save PS for stack write
1945        bge	r25, foe_ipte_handler_10_		// no stack swap needed if cm=kern
1946
1947
1948        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
1949                                        //     no virt ref for next 2 cycles
1950        mtpr	r30, pt_usp		// save user stack
1951
1952        bis	r31, r31, r11		// Set new PS
1953        mfpr	r30, pt_ksp
1954
1955        srl	r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1956        nop
1957
1958foe_ipte_handler_10_:	srl	r8, osfpte_v_kre, r25	// get kre to <0>
1959        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
1960
1961        or	r10, r31, r14		// Save pc/va in case TBmiss or fault on stack
1962        mfpr	r13, pt_entmm		// get entry point
1963
1964        stq	r16, osfsf_a0(sp)	// a0
1965        or	r14, r31, r16		// pass pc/va as a0
1966
1967        stq	r17, osfsf_a1(sp)	// a1
1968        nop
1969
1970        stq	r18, osfsf_a2(sp) 	// a2
1971        lda	r17, mmcsr_c_acv(r31)	// assume ACV
1972
1973        stq	r16, osfsf_pc(sp)	// save pc
1974        cmovlbs r25, mmcsr_c_foe, r17	// otherwise FOE
1975
1976        stq	r12, osfsf_ps(sp)	// save ps
1977        subq	r31, 1, r18		// pass flag of istream as a2
1978
1979        stq	r29, osfsf_gp(sp)
1980        mtpr	r13, exc_addr		// set vector address
1981
1982        mfpr	r29, pt_kgp		// load kgp
1983        hw_rei_spe			// out to exec
1984
1985        ALIGN_BLOCK
1986//+
1987//invalid_ipte_handler
1988// TNV detected on level 3 pte, sort out TNV vs ACV
1989//
1990// on entry:
1991//	with
1992//	R8	 = pte
1993//	R10	 = pc
1994//
1995// Function
1996//	Determine TNV vs ACV. Build stack and dispatch.
1997//-
1998
1999invalid_ipte_handler:
2000        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2001        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
2002
2003        bis	r11, r31, r12		// Save PS for stack write
2004        bge	r25, invalid_ipte_handler_10_		// no stack swap needed if cm=kern
2005
2006
2007        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
2008                                        //     no virt ref for next 2 cycles
2009        mtpr	r30, pt_usp		// save user stack
2010
2011        bis	r31, r31, r11		// Set new PS
2012        mfpr	r30, pt_ksp
2013
2014        srl	r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2015        nop
2016
2017invalid_ipte_handler_10_:	srl	r8, osfpte_v_kre, r25	// get kre to <0>
2018        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
2019
2020        or	r10, r31, r14		// Save pc/va in case TBmiss on stack
2021        mfpr	r13, pt_entmm		// get entry point
2022
2023        stq	r16, osfsf_a0(sp)	// a0
2024        or	r14, r31, r16		// pass pc/va as a0
2025
2026        stq	r17, osfsf_a1(sp)	// a1
2027        nop
2028
2029        stq	r18, osfsf_a2(sp) 	// a2
2030        and	r25, 1, r17		// Isolate kre
2031
2032        stq	r16, osfsf_pc(sp)	// save pc
2033        xor	r17, 1, r17		// map to acv/tnv as a1
2034
2035        stq	r12, osfsf_ps(sp)	// save ps
2036        subq	r31, 1, r18		// pass flag of istream as a2
2037
2038        stq	r29, osfsf_gp(sp)
2039        mtpr	r13, exc_addr		// set vector address
2040
2041        mfpr	r29, pt_kgp		// load kgp
2042        hw_rei_spe			// out to exec
2043
2044
2045
2046
2047        ALIGN_BLOCK
2048//+
2049//invalid_dpte_handler
2050// INVALID detected on level 3 pte, sort out TNV vs ACV
2051//
2052// on entry:
2053//	with
2054//	R10	 = va
2055//	R8	 = pte
2056//	R9	 = mm_stat
2057//	PT6	 = pc
2058//
2059// Function
2060//	Determine TNV vs ACV. Build stack and dispatch
2061//-
2062
2063
2064invalid_dpte_handler:
2065        mfpr	r12, pt6
2066        blbs	r12, tnv_in_pal		// Special handler if original faulting reference was in PALmode
2067
2068        bis	r12, r31, r14		// save PC in case of tbmiss or fault
2069        srl	r9, mm_stat_v_opcode, r25	// shift opc to <0>
2070
2071        mtpr	r11, pt0		// Save PS for stack write
2072        and 	r25, mm_stat_m_opcode, r25	// isolate opcode
2073
2074        cmpeq	r25, evx_opc_sync, r25	// is it FETCH/FETCH_M?
2075        blbs	r25, nmiss_fetch_ldr31_err	// yes
2076
2077        //dismiss exception if load to r31/f31
2078        blbs	r9, invalid_dpte_no_dismiss	// mm_stat<0> set on store or fetchm
2079
2080                                        // not a store or fetch, must be a load
2081        srl	r9, mm_stat_v_ra, r25	// Shift rnum to low bits
2082
2083        and	r25, 0x1F, r25		// isolate rnum
2084        nop
2085
2086        cmpeq   r25, 0x1F, r25  	// Is the rnum r31 or f31?
2087        bne     r25, nmiss_fetch_ldr31_err    // Yes, dismiss the fault
2088
2089invalid_dpte_no_dismiss:
2090        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2091        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
2092
2093        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
2094                                        //     no virt ref for next 2 cycles
2095        bge	r25, invalid_dpte_no_dismiss_10_		// no stack swap needed if cm=kern
2096
2097        srl	r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2098        mtpr	r30, pt_usp		// save user stack
2099
2100        bis	r31, r31, r11		// Set new PS
2101        mfpr	r30, pt_ksp
2102
2103invalid_dpte_no_dismiss_10_:	srl	r8, osfpte_v_kre, r12	// get kre to <0>
2104        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
2105
2106        or	r10, r31, r25		// Save va in case TBmiss on stack
2107        and	r9, 1, r13		// save r/w flag
2108
2109        stq	r16, osfsf_a0(sp)	// a0
2110        or	r25, r31, r16		// pass va as a0
2111
2112        stq	r17, osfsf_a1(sp)	// a1
2113        or	r31, mmcsr_c_acv, r17 	// assume acv
2114
2115        srl	r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
2116        stq	r29, osfsf_gp(sp)
2117
2118        stq	r18, osfsf_a2(sp) 	// a2
2119        cmovlbs r13, r25, r12		// if write access move acv based on write enable
2120
2121        or	r13, r31, r18		// pass flag of dstream access and read vs write
2122        mfpr	r25, pt0		// get ps
2123
2124        stq	r14, osfsf_pc(sp)	// save pc
2125        mfpr	r13, pt_entmm		// get entry point
2126
2127        stq	r25, osfsf_ps(sp)	// save ps
2128        mtpr	r13, exc_addr		// set vector address
2129
2130        mfpr	r29, pt_kgp		// load kgp
2131        cmovlbs	r12, mmcsr_c_tnv, r17 	// make p2 be tnv if access ok else acv
2132
2133        hw_rei_spe			// out to exec
2134
2135//+
2136//
2137// We come here if we are erring on a dtb_miss, and the instr is a
2138// fetch, fetch_m, of load to r31/f31.
2139// The PC is incremented, and we return to the program.
2140// essentially ignoring the instruction and error.
2141//
2142//-
2143        ALIGN_BLOCK
2144nmiss_fetch_ldr31_err:
2145        mfpr	r12, pt6
2146        addq	r12, 4, r12		// bump pc to pc+4
2147
2148        mtpr	r12, exc_addr		// and set entry point
2149        mfpr	r31, pt0		// pad exc_addr write
2150
2151        hw_rei				//
2152
2153        ALIGN_BLOCK
2154//+
2155// double_pte_inv
2156//	We had a single tbmiss which turned into a double tbmiss which found
2157//	an invalid PTE.  Return to single miss with a fake pte, and the invalid
2158//	single miss flow will report the error.
2159//
2160// on entry:
2161//	r21  	PTE
2162//	r22	available
2163//	VA IPR	locked with original fault VA
2164//       pt4  	saved r21
2165//	pt5  	saved r22
2166//	pt6	original exc_addr
2167//
2168// on return to tbmiss flow:
2169//	r8	fake PTE
2170//
2171//
2172//-
2173double_pte_inv:
2174        srl	r21, osfpte_v_kre, r21	// get the kre bit to <0>
2175        mfpr	r22, exc_addr		// get the pc
2176
2177        lda	r22, 4(r22)		// inc the pc
2178        lda	r8, osfpte_m_prot(r31)	 // make a fake pte with xre and xwe set
2179
2180        cmovlbc r21, r31, r8		// set to all 0 for acv if pte<kre> is 0
2181        mtpr	r22, exc_addr		// set for rei
2182
2183        mfpr	r21, pt4		// restore regs
2184        mfpr	r22, pt5		// restore regs
2185
2186        hw_rei				// back to tb miss
2187
2188        ALIGN_BLOCK
2189//+
2190//tnv_in_pal
2191//	The only places in pal that ld or store are the
2192// 	stack builders, rti or retsys.  Any of these mean we
2193//	need to take a ksp not valid halt.
2194//
2195//-
2196tnv_in_pal:
2197
2198
2199        br	r31, ksp_inval_halt
2200
2201
2202// .sbttl	"Icache flush routines"
2203
2204        ALIGN_BLOCK
2205//+
2206// Common Icache flush routine.
2207//
2208//
2209//-
2210pal_ic_flush:
2211        nop
2212        mtpr	r31, ev5__ic_flush_ctl		// Icache flush - E1
2213        nop
2214        nop
2215
2216// Now, do 44 NOPs.  3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2217        nop
2218        nop
2219        nop
2220        nop
2221
2222        nop
2223        nop
2224        nop
2225        nop
2226
2227        nop
2228        nop		// 10
2229
2230        nop
2231        nop
2232        nop
2233        nop
2234
2235        nop
2236        nop
2237        nop
2238        nop
2239
2240        nop
2241        nop		// 20
2242
2243        nop
2244        nop
2245        nop
2246        nop
2247
2248        nop
2249        nop
2250        nop
2251        nop
2252
2253        nop
2254        nop		// 30
2255        nop
2256        nop
2257        nop
2258        nop
2259
2260        nop
2261        nop
2262        nop
2263        nop
2264
2265        nop
2266        nop		// 40
2267
2268        nop
2269        nop
2270
2271one_cycle_and_hw_rei:
2272        nop
2273        nop
2274
2275        hw_rei_stall
2276
2277#if icflush_on_tbix != 0
2278
2279
2280        ALIGN_BLOCK
2281
2282//+
2283// Common Icache flush and ITB invalidate single routine.
2284// ITBIS and hw_rei_stall must be in same octaword.
2285//	r17 - has address to invalidate
2286//
2287//-
2288PAL_IC_FLUSH_AND_TBISI:
2289        nop
2290        mtpr	r31, ev5__ic_flush_ctl		// Icache flush - E1
2291        nop
2292        nop
2293
2294// Now, do 44 NOPs.  3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2295        nop
2296        nop
2297        nop
2298        nop
2299
2300        nop
2301        nop
2302        nop
2303        nop
2304
2305        nop
2306        nop		// 10
2307
2308        nop
2309        nop
2310        nop
2311        nop
2312
2313        nop
2314        nop
2315        nop
2316        nop
2317
2318        nop
2319        nop		// 20
2320
2321        nop
2322        nop
2323        nop
2324        nop
2325
2326        nop
2327        nop
2328        nop
2329        nop
2330
2331        nop
2332        nop		// 30
2333        nop
2334        nop
2335        nop
2336        nop
2337
2338        nop
2339        nop
2340        nop
2341        nop
2342
2343        nop
2344        nop		// 40
2345
2346
2347        nop
2348        nop
2349
2350        nop
2351        nop
2352
2353        // A quadword is 64 bits, so an octaword is 128 bits -> 16 bytes -> 4 instructions
2354        // 44 nops plus 4 instructions before it is 48 instructions.
2355        // Since this routine started on a 32-byte (8 instruction) boundary,
2356        // the following 2 instructions will be in the same octword as required.
2357//	ALIGN_BRANCH
2358        mtpr	r17, ev5__itb_is	// Flush ITB
2359        hw_rei_stall
2360
2361#endif
2362
2363        ALIGN_BLOCK
2364//+
2365//osfpal_calpal_opcdec
2366//  Here for all opcdec CALL_PALs
2367//
2368//	Build stack frame
2369//	a0 <- code
2370//	a1 <- unpred
2371//	a2 <- unpred
2372//	vector via entIF
2373//
2374//-
2375
2376osfpal_calpal_opcdec:
2377        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2378        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
2379
2380        mfpr	r14, exc_addr		// get pc
2381        nop
2382
2383        bis	r11, r31, r12		// Save PS for stack write
2384        bge	r25, osfpal_calpal_opcdec_10_		// no stack swap needed if cm=kern
2385
2386
2387        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
2388                                        //     no virt ref for next 2 cycles
2389        mtpr	r30, pt_usp		// save user stack
2390
2391        bis	r31, r31, r11		// Set new PS
2392        mfpr	r30, pt_ksp
2393
2394osfpal_calpal_opcdec_10_:
2395        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
2396        nop
2397
2398        stq	r16, osfsf_a0(sp)	// save regs
2399        bis	r31, osf_a0_opdec, r16	// set a0
2400
2401        stq	r18, osfsf_a2(sp) 	// a2
2402        mfpr	r13, pt_entif		// get entry point
2403
2404        stq	r12, osfsf_ps(sp)	// save old ps
2405        stq	r17, osfsf_a1(sp)	// a1
2406
2407        stq	r14, osfsf_pc(sp)	// save pc
2408        nop
2409
2410        stq	r29, osfsf_gp(sp) 	// save gp
2411        mtpr	r13, exc_addr		// load exc_addr with entIF
2412                                        // 1 cycle to hw_rei
2413
2414        mfpr	r29, pt_kgp		// get the kgp
2415
2416
2417        hw_rei_spe			// done
2418
2419
2420
2421
2422
2423//+
2424//pal_update_pcb
2425//	Update the PCB with the current SP, AST, and CC info
2426//
2427//	r0 - return linkage
2428//-
2429        ALIGN_BLOCK
2430
2431pal_update_pcb:
2432        mfpr	r12, pt_pcbb		// get pcbb
2433        and	r11, osfps_m_mode, r25	// get mode
2434        beq	r25, pal_update_pcb_10_		// in kern? no need to update user sp
2435        mtpr	r30, pt_usp		// save user stack
2436        stqp	r30, osfpcb_q_usp(r12)	// store usp
2437        br	r31, pal_update_pcb_20_		// join common
2438pal_update_pcb_10_:	stqp	r30, osfpcb_q_ksp(r12)	// store ksp
2439pal_update_pcb_20_:	rpcc	r13			// get cyccounter
2440        srl	r13, 32, r14		// move offset
2441        addl	r13, r14, r14		// merge for new time
2442        stlp	r14, osfpcb_l_cc(r12)	// save time
2443
2444//orig	pvc_jsr	updpcb, bsr=1, dest=1
2445        ret	r31, (r0)
2446
2447
2448
2449#if remove_save_state == 0
2450
2451// .sbttl  "PAL_SAVE_STATE"
2452//+
2453//
2454// Pal_save_state
2455//
2456//	Function
2457//		All chip state saved, all PT's, SR's FR's, IPR's
2458//
2459//
2460// Regs' on entry...
2461//
2462//	R0 	= halt code
2463//	pt0	= r0
2464//	R1	= pointer to impure
2465//	pt4	= r1
2466//	R3	= return addr
2467//	pt5	= r3
2468//
2469//	register usage:
2470//		r0 = halt_code
2471//		r1 = addr of impure area
2472//		r3 = return_address
2473//		r4 = scratch
2474//
2475//-
2476
2477
2478        ALIGN_BLOCK
2479        .globl pal_save_state
2480pal_save_state:
2481//
2482//
2483// start of implementation independent save routine
2484//
2485// 		the impure area is larger than the addressibility of hw_ld and hw_st
2486//		therefore, we need to play some games:  The impure area
2487//		is informally divided into the "machine independent" part and the
2488//		"machine dependent" part.  The state that will be saved in the
2489//    		"machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use  (un)fix_impure_gpr macros).
2490//		All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
2491//		The impure pointer will need to be adjusted by a different offset for each.  The store/restore_reg
2492//		macros will automagically adjust the offset correctly.
2493//
2494
2495// The distributed code is commented out and followed by corresponding SRC code.
2496// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
2497
2498//orig	fix_impure_gpr	r1		// adjust impure area pointer for stores to "gpr" part of impure area
2499        lda	r1, 0x200(r1)		// Point to center of CPU segment
2500//orig	store_reg1 flag, r31, r1, ipr=1	// clear dump area flag
2501        SAVE_GPR(r31,CNS_Q_FLAG,r1)	// Clear the valid flag
2502//orig	store_reg1 hlt, r0, r1, ipr=1
2503        SAVE_GPR(r0,CNS_Q_HALT,r1)	// Save the halt code
2504
2505        mfpr	r0, pt0			// get r0 back			//orig
2506//orig	store_reg1 0, r0, r1		// save r0
2507        SAVE_GPR(r0,CNS_Q_GPR+0x00,r1)	// Save r0
2508
2509        mfpr	r0, pt4			// get r1 back			//orig
2510//orig	store_reg1 1, r0, r1		// save r1
2511        SAVE_GPR(r0,CNS_Q_GPR+0x08,r1)	// Save r1
2512
2513//orig	store_reg 2			// save r2
2514        SAVE_GPR(r2,CNS_Q_GPR+0x10,r1)	// Save r2
2515
2516        mfpr	r0, pt5			// get r3 back			//orig
2517//orig	store_reg1 3, r0, r1		// save r3
2518        SAVE_GPR(r0,CNS_Q_GPR+0x18,r1)	// Save r3
2519
2520        // reason code has been saved
2521        // r0 has been saved
2522        // r1 has been saved
2523        // r2 has been saved
2524        // r3 has been saved
2525        // pt0, pt4, pt5 have been lost
2526
2527        //
2528        // Get out of shadow mode
2529        //
2530
2531        mfpr	r2, icsr		// Get icsr			//orig
2532//orig	ldah	r0, <1@<icsr_v_sde-16>>(r31)	// Get a one in SHADOW_ENABLE bit location
2533        ldah	r0, (1<<(icsr_v_sde-16))(r31)
2534        bic	r2, r0, r0		// ICSR with SDE clear		//orig
2535        mtpr	r0, icsr		// Turn off SDE			//orig
2536
2537        mfpr	r31, pt0		// SDE bubble cycle 1		//orig
2538        mfpr	r31, pt0		// SDE bubble cycle 2		//orig
2539        mfpr	r31, pt0		// SDE bubble cycle 3		//orig
2540        nop								//orig
2541
2542
2543        // save integer regs R4-r31
2544//orig  #define t 4
2545//orig	.repeat 28
2546//orig	  store_reg \t
2547//orig #define t t + 1
2548//orig	.endr
2549        SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
2550        SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
2551        SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
2552        SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
2553        SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
2554        SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
2555        SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
2556        SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
2557        SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
2558        SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
2559        SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
2560        SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
2561        SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
2562        SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
2563        SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
2564        SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
2565        SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2566        SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2567        SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2568        SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2569        SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2570        SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2571        SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2572        SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2573        SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2574        SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2575        SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2576        SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2577
2578        // save all paltemp regs except pt0
2579
2580//orig	unfix_impure_gpr	r1		// adjust impure area pointer for gpr stores
2581//orig	fix_impure_ipr	r1			// adjust impure area pointer for pt stores
2582//orig #define t 1
2583//orig	.repeat 23
2584//orig	  store_reg \t	, pal=1
2585//orig #define t t + 1
2586//orig	.endr
2587
2588        lda	r1, -0x200(r1)		// Restore the impure base address.
2589        lda	r1, CNS_Q_IPR(r1)	// Point to the base of IPR area.
2590        SAVE_IPR(pt0,CNS_Q_PT+0x00,r1)		// the osf code didn't save/restore palTemp 0 ?? pboyle
2591        SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
2592        SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
2593        SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
2594        SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
2595        SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
2596        SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
2597        SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
2598        SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
2599        SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
2600        SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
2601        SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
2602        SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
2603        SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
2604        SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
2605        SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
2606        SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
2607        SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
2608        SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
2609        SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
2610        SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2611        SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2612        SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2613        SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2614
2615        // Restore shadow mode
2616        mfpr	r31, pt0		// pad write to icsr out of shadow of store (trap does not abort write)	//orig
2617        mfpr	r31, pt0											//orig
2618        mtpr	r2, icsr		// Restore original ICSR						//orig
2619
2620        mfpr	r31, pt0		// SDE bubble cycle 1							//orig
2621        mfpr	r31, pt0		// SDE bubble cycle 2							//orig
2622        mfpr	r31, pt0		// SDE bubble cycle 3							//orig
2623        nop													//orig
2624
2625        // save all integer shadow regs
2626
2627//orig #define t 8
2628//orig	.repeat 7
2629//orig	  store_reg \t,  shadow=1
2630//orig #define t t + 1
2631//orig	.endr
2632//orig	store_reg 25,  shadow=1
2633
2634        SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1)	// also called p0...p7 in the Hudson code
2635        SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2636        SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2637        SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2638        SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2639        SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2640        SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2641        SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2642
2643//orig	store_reg exc_addr,	ipr=1	// save ipr
2644//orig	store_reg pal_base,	ipr=1	// save ipr
2645//orig	store_reg mm_stat,	ipr=1	// save ipr
2646//orig	store_reg va,		ipr=1	// save ipr
2647//orig	store_reg icsr,		ipr=1   // save ipr
2648//orig	store_reg ipl,		ipr=1	// save ipr
2649//orig	store_reg ps,		ipr=1	// save ipr
2650//orig	store_reg itb_asn,	ipr=1   // save ipr
2651//orig	store_reg aster,	ipr=1	// save ipr
2652//orig	store_reg astrr,	ipr=1	// save ipr
2653//orig	store_reg sirr,		ipr=1	// save ipr
2654//orig	store_reg isr,		ipr=1	// save ipr
2655//orig	store_reg ivptbr,	ipr=1	// save ipr
2656//orig	store_reg mcsr,		ipr=1	// save ipr
2657//orig	store_reg dc_mode,	ipr=1	// save ipr
2658
2659        SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2660        SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2661        SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
2662        SAVE_IPR(va,CNS_Q_VA,r1)
2663        SAVE_IPR(icsr,CNS_Q_ICSR,r1)
2664        SAVE_IPR(ipl,CNS_Q_IPL,r1)
2665        SAVE_IPR(ips,CNS_Q_IPS,r1)
2666        SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2667        SAVE_IPR(aster,CNS_Q_ASTER,r1)
2668        SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
2669        SAVE_IPR(sirr,CNS_Q_SIRR,r1)
2670        SAVE_IPR(isr,CNS_Q_ISR,r1)
2671        SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2672        SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
2673        SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2674
2675//orig	pvc_violate 379			// mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
2676//orig	store_reg maf_mode,	ipr=1	// save ipr -- no mbox instructions for
2677//orig                                  // PVC violation applies only to
2678pvc$osf35$379:				    // loads. HW_ST ok here, so ignore
2679        SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
2680
2681
2682        //the following iprs are informational only -- will not be restored
2683
2684//orig	store_reg icperr_stat,	ipr=1
2685//orig	store_reg pmctr,	ipr=1
2686//orig	store_reg intid,	ipr=1
2687//orig	store_reg exc_sum,	ipr=1
2688//orig	store_reg exc_mask,	ipr=1
2689//orig	ldah	r14, 0xfff0(r31)
2690//orig	zap	r14, 0xE0, r14			// Get Cbox IPR base
2691//orig	nop					// pad mf dcperr_stat out of shadow of last store
2692//orig	nop
2693//orig	nop
2694//orig	store_reg dcperr_stat,	ipr=1
2695
2696        SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
2697        SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
2698        SAVE_IPR(intId,CNS_Q_INT_ID,r1)
2699        SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
2700        SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
2701        ldah	r14, 0xFFF0(zero)
2702        zap	r14, 0xE0, r14		// Get base address of CBOX IPRs
2703        NOP				// Pad mfpr dcPerr out of shadow of
2704        NOP				// last store
2705        NOP
2706        SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
2707
2708        // read cbox ipr state
2709
2710//orig	mb
2711//orig	ldqp	r2, ev5__sc_ctl(r14)
2712//orig	ldqp	r13, ld_lock(r14)
2713//orig	ldqp	r4, ev5__sc_addr(r14)
2714//orig	ldqp	r5, ev5__ei_addr(r14)
2715//orig	ldqp	r6, ev5__bc_tag_addr(r14)
2716//orig	ldqp	r7, ev5__fill_syn(r14)
2717//orig	bis	r5, r4, r31
2718//orig	bis	r7, r6, r31		// make sure previous loads finish before reading stat registers which unlock them
2719//orig	ldqp	r8, ev5__sc_stat(r14)	// unlocks sc_stat,sc_addr
2720//orig	ldqp	r9, ev5__ei_stat(r14)	// may unlock ei_*, bc_tag_addr, fill_syn
2721//orig	ldqp	r31, ev5__ei_stat(r14)	// ensures it is really unlocked
2722//orig	mb
2723
2724#ifndef SIMOS
2725        mb
2726        ldq_p	r2, scCtl(r14)
2727        ldq_p	r13, ldLock(r14)
2728        ldq_p	r4, scAddr(r14)
2729        ldq_p	r5, eiAddr(r14)
2730        ldq_p	r6, bcTagAddr(r14)
2731        ldq_p	r7, fillSyn(r14)
2732        bis	r5, r4, zero		// Make sure all loads complete before
2733        bis	r7, r6, zero		// reading registers that unlock them.
2734        ldq_p	r8, scStat(r14)		// Unlocks scAddr.
2735        ldq_p	r9, eiStat(r14)		// Unlocks eiAddr, bcTagAddr, fillSyn.
2736        ldq_p	zero, eiStat(r14)	// Make sure it is really unlocked.
2737        mb
2738#endif
2739//orig	// save cbox ipr state
2740//orig	store_reg1 sc_ctl, r2, r1, ipr=1
2741//orig	store_reg1 ld_lock, r13, r1, ipr=1
2742//orig	store_reg1 sc_addr, r4, r1, ipr=1
2743//orig	store_reg1 ei_addr, r5, r1, ipr=1
2744//orig	store_reg1 bc_tag_addr, r6, r1, ipr=1
2745//orig	store_reg1 fill_syn, r7, r1, ipr=1
2746//orig	store_reg1 sc_stat, r8, r1, ipr=1
2747//orig	store_reg1 ei_stat, r9, r1, ipr=1
2748//orig //bc_config? sl_rcv?
2749
2750        SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
2751        SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
2752        SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
2753        SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
2754        SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
2755        SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
2756        SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
2757        SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
2758
2759// restore impure base								//orig
2760//orig	unfix_impure_ipr r1
2761        lda	r1, -CNS_Q_IPR(r1)
2762
2763// save all floating regs							//orig
2764        mfpr	r0, icsr		// get icsr				//orig
2765        or	r31, 1, r2		// get a one				//orig
2766//orig	sll	r2, #icsr_v_fpe, r2	// shift for fpu spot			//orig
2767        sll	r2, icsr_v_fpe, r2	// Shift it into ICSR<FPE> position
2768        or	r2, r0, r0		// set FEN on				//orig
2769        mtpr	r0, icsr		// write to icsr, enabling FEN		//orig
2770
2771// map the save area virtually
2772// orig	mtpr	r31, dtb_ia		// clear the dtb
2773// orig	srl	r1, page_offset_size_bits, r0 // Clean off low bits of VA
2774// orig	sll	r0, 32, r0		// shift to PFN field
2775// orig	lda	r2, 0xff(r31)		// all read enable and write enable bits set
2776// orig	sll	r2, 8, r2		// move to PTE location
2777// orig	addq	r0, r2, r0		// combine with PFN
2778// orig	mtpr	r0, dtb_pte		// Load PTE and set TB valid bit
2779// orig	mtpr	r1, dtb_tag		// write TB tag
2780
2781        mtpr	r31, dtbIa		// Clear all DTB entries
2782        srl	r1, va_s_off, r0	// Clean off byte-within-page offset
2783        sll	r0, pte_v_pfn, r0	// Shift to form PFN
2784        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
2785        mtpr	r0, dtbPte		// Load the PTE and set valid
2786        mtpr	r1, dtbTag		// Write the PTE and tag into the DTB
2787
2788
2789//orig // map the next page too - in case the impure area crosses a page boundary
2790//orig	lda 	r4, 1@page_offset_size_bits(r1)	// generate address for next page
2791//orig	srl	r4, page_offset_size_bits, r0 // Clean off low bits of VA
2792//orig	sll	r0, 32, r0		// shift to PFN field
2793//orig	lda	r2, 0xff(r31)		// all read enable and write enable bits set
2794//orig	sll	r2, 8, r2		// move to PTE location
2795//orig	addq	r0, r2, r0		// combine with PFN
2796//orig	mtpr	r0, dtb_pte		// Load PTE and set TB valid bit
2797//orig	mtpr	r4, dtb_tag		// write TB tag
2798
2799        lda	r4, (1<<va_s_off)(r1)	// Generate address for next page
2800        srl	r4, va_s_off, r0	// Clean off byte-within-page offset
2801        sll	r0, pte_v_pfn, r0	// Shift to form PFN
2802        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
2803        mtpr	r0, dtbPte		// Load the PTE and set valid
2804        mtpr	r4, dtbTag		// Write the PTE and tag into the DTB
2805
2806        sll	r31, 0, r31		// stall cycle 1				// orig
2807        sll	r31, 0, r31		// stall cycle 2				// orig
2808        sll	r31, 0, r31		// stall cycle 3				// orig
2809        nop										// orig
2810
2811//orig // add offset for saving fpr regs
2812//orig	fix_impure_gpr r1
2813
2814        lda	r1, 0x200(r1)		// Point to center of CPU segment
2815
2816// now save the regs - F0-F31
2817
2818//orig #define t 0
2819//orig	.repeat 32
2820//orig	  store_reg \t , fpu=1
2821//orig #define t t + 1
2822//orig	.endr
2823
2824        mf_fpcr  f0			// original
2825
2826        SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
2827        SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
2828        SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
2829        SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
2830        SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
2831        SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
2832        SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
2833        SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
2834        SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
2835        SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
2836        SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
2837        SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
2838        SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
2839        SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
2840        SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
2841        SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
2842        SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
2843        SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
2844        SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
2845        SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
2846        SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2847        SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2848        SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2849        SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2850        SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2851        SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2852        SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2853        SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2854        SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2855        SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2856        SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2857        SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2858
2859//orig	//switch impure offset from gpr to ipr---
2860//orig	unfix_impure_gpr	r1
2861//orig	fix_impure_ipr	r1
2862//orig	store_reg1 fpcsr, f0, r1, fpcsr=1
2863
2864        SAVE_FPR(f0,CNS_Q_FPCSR,r1)	// fpcsr loaded above into f0 -- can it reach// pb
2865        lda	r1, -0x200(r1)		// Restore the impure base address
2866
2867//orig	// and back to gpr ---
2868//orig	unfix_impure_ipr	r1
2869//orig	fix_impure_gpr	r1
2870
2871//orig	lda	r0, cns_mchksize(r31)	// get size of mchk area
2872//orig	store_reg1 mchkflag, r0, r1, ipr=1
2873//orig	mb
2874
2875        lda	r1, CNS_Q_IPR(r1)	// Point to base of IPR area again
2876        // save this using the IPR base (it is closer) not the GRP base as they used...pb
2877        lda	r0, MACHINE_CHECK_SIZE(r31)	// get size of mchk area
2878        SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2879        mb
2880
2881//orig	or	r31, 1, r0		// get a one
2882//orig	store_reg1 flag, r0, r1, ipr=1	// set dump area flag
2883//orig	mb
2884
2885        lda	r1, -CNS_Q_IPR(r1)	// back to the base
2886        lda	r1, 0x200(r1)		// Point to center of CPU segment
2887        or	r31, 1, r0		// get a one
2888        SAVE_GPR(r0,CNS_Q_FLAG,r1)	// // set dump area valid flag
2889        mb
2890
2891//orig	// restore impure area base
2892//orig	unfix_impure_gpr r1
2893        lda	r1, -0x200(r1)		// Point to center of CPU segment
2894
2895        mtpr	r31, dtb_ia		// clear the dtb	//orig
2896        mtpr	r31, itb_ia		// clear the itb	//orig
2897
2898//orig	pvc_jsr	savsta, bsr=1, dest=1
2899        ret	r31, (r3)		// and back we go
2900#endif
2901
2902
2903#if remove_restore_state == 0
2904
2905
2906// .sbttl  "PAL_RESTORE_STATE"
2907//+
2908//
2909//	Pal_restore_state
2910//
2911//
2912//	register usage:
2913//		r1 = addr of impure area
2914//		r3 = return_address
2915//		all other regs are scratchable, as they are about to
2916//		be reloaded from ram.
2917//
2918//	Function:
2919//		All chip state restored, all SRs, FRs, PTs, IPRs
2920//					*** except R1, R3, PT0, PT4, PT5 ***
2921//
2922//-
2923        ALIGN_BLOCK
2924pal_restore_state:
2925
2926//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2927
2928//orig	// map the console io area virtually
2929//orig	mtpr	r31, dtb_ia		// clear the dtb
2930//orig	srl	r1, page_offset_size_bits, r0 // Clean off low bits of VA
2931//orig	sll	r0, 32, r0		// shift to PFN field
2932//orig	lda	r2, 0xff(r31)		// all read enable and write enable bits set
2933//orig	sll	r2, 8, r2		// move to PTE location
2934//orig	addq	r0, r2, r0		// combine with PFN
2935//orig
2936//orig	mtpr	r0, dtb_pte		// Load PTE and set TB valid bit
2937//orig	mtpr	r1, dtb_tag		// write TB tag
2938//orig
2939
2940        mtpr	r31, dtbIa		// Clear all DTB entries
2941        srl	r1, va_s_off, r0	// Clean off byte-within-page offset
2942        sll	r0, pte_v_pfn, r0	// Shift to form PFN
2943        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
2944        mtpr	r0, dtbPte		// Load the PTE and set valid
2945        mtpr	r1, dtbTag		// Write the PTE and tag into the DTB
2946
2947
2948//orig	// map the next page too, in case impure area crosses page boundary
2949//orig	lda 	r4, 1@page_offset_size_bits(r1)	// generate address for next page
2950//orig	srl	r4, page_offset_size_bits, r0 // Clean off low bits of VA
2951//orig	sll	r0, 32, r0		// shift to PFN field
2952//orig	lda	r2, 0xff(r31)		// all read enable and write enable bits set
2953//orig	sll	r2, 8, r2		// move to PTE location
2954//orig	addq	r0, r2, r0		// combine with PFN
2955//orig
2956//orig	mtpr	r0, dtb_pte		// Load PTE and set TB valid bit
2957//orig	mtpr	r4, dtb_tag		// write TB tag - no virtual mbox instruction for 3 cycles
2958
2959        lda	r4, (1<<VA_S_OFF)(r1)	// Generate address for next page
2960        srl	r4, va_s_off, r0	// Clean off byte-within-page offset
2961        sll	r0, pte_v_pfn, r0	// Shift to form PFN
2962        lda	r0, pte_m_prot(r0)	// Set all read/write enable bits
2963        mtpr	r0, dtbPte		// Load the PTE and set valid
2964        mtpr	r4, dtbTag		// Write the PTE and tag into the DTB
2965
2966//orig	// save all floating regs
2967//orig	mfpr	r0, icsr		// get icsr
2968//orig// 	assume	ICSR_V_SDE gt <ICSR_V_FPE>		// assertion checker
2969//orig	or	r31, <<1@<ICSR_V_SDE-ICSR_V_FPE>> ! 1>, r2	// set SDE and FPE
2970//orig	sll	r2, #icsr_v_fpe, r2	// shift for fpu spot
2971//orig	or	r2, r0, r0		// set FEN on
2972//orig	mtpr	r0, icsr		// write to icsr, enabling FEN and SDE.  3 bubbles to floating instr.
2973
2974        mfpr	r0, icsr		// Get current ICSR
2975        bis	zero, 1, r2		// Get a '1'
2976        or	r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
2977        sll	r2, icsr_v_fpe, r2	// Shift bits into position
2978        bis	r2, r2, r0		// Set ICSR<SDE> and ICSR<FPE>
2979        mtpr	r0, icsr		// Update the chip
2980
2981        mfpr	r31, pt0		// FPE bubble cycle 1		//orig
2982        mfpr	r31, pt0		// FPE bubble cycle 2		//orig
2983        mfpr	r31, pt0		// FPE bubble cycle 3		//orig
2984
2985//orig	fix_impure_ipr r1
2986//orig	restore_reg1 fpcsr, f0, r1, fpcsr=1
2987//orig	mt_fpcr  f0
2988//orig
2989//orig	unfix_impure_ipr r1
2990//orig	fix_impure_gpr r1		// adjust impure pointer offset for gpr access
2991//orig
2992//orig	// restore all floating regs
2993//orig#define t 0
2994//orig	.repeat 32
2995//orig	  restore_reg \t , fpu=1
2996//orig#define t t + 1
2997//orig	.endr
2998
2999        lda	r1, 200(r1)	// Point to base of IPR area again
3000        RESTORE_FPR(f0,CNS_Q_FPCSR,r1)		// can it reach?? pb
3001        mt_fpcr  f0			// original
3002
3003        lda	r1, 0x200(r1)		// point to center of CPU segment
3004        RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
3005        RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
3006        RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
3007        RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
3008        RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
3009        RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
3010        RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
3011        RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
3012        RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
3013        RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
3014        RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
3015        RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
3016        RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
3017        RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
3018        RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
3019        RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
3020        RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
3021        RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
3022        RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
3023        RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
3024        RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
3025        RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
3026        RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
3027        RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
3028        RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
3029        RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
3030        RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
3031        RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
3032        RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
3033        RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
3034        RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
3035        RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
3036
3037//orig	// switch impure pointer from gpr to ipr area --
3038//orig	unfix_impure_gpr r1
3039//orig	fix_impure_ipr r1
3040//orig
3041//orig	// restore all pal regs
3042//orig#define t 1
3043//orig	.repeat 23
3044//orig	  restore_reg \t	, pal=1
3045//orig#define t t + 1
3046//orig	.endr
3047
3048        lda	r1, -0x200(r1)		// Restore base address of impure area.
3049        lda	r1, CNS_Q_IPR(r1)	// Point to base of IPR area.
3050        RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1)		// the osf code didn't save/restore palTemp 0 ?? pboyle
3051        RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
3052        RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
3053        RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
3054        RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
3055        RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
3056        RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
3057        RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
3058        RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
3059        RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
3060        RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
3061        RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
3062        RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
3063        RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
3064        RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
3065        RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
3066        RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
3067        RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
3068        RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
3069        RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
3070        RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
3071        RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
3072        RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
3073        RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
3074
3075
3076//orig	restore_reg exc_addr,	ipr=1	// restore ipr
3077//orig	restore_reg pal_base,	ipr=1	// restore ipr
3078//orig	restore_reg ipl,	ipr=1	// restore ipr
3079//orig	restore_reg ps,		ipr=1	// restore ipr
3080//orig	mtpr	r0, dtb_cm		// set current mode in mbox too
3081//orig	restore_reg itb_asn,	ipr=1
3082//orig	srl	r0, itb_asn_v_asn, r0
3083//orig	sll	r0, dtb_asn_v_asn, r0
3084//orig	mtpr	r0, dtb_asn		// set ASN in Mbox too
3085//orig	restore_reg ivptbr,	ipr=1
3086//orig	mtpr	r0, mvptbr			// use ivptbr value to restore mvptbr
3087//orig	restore_reg mcsr,	ipr=1
3088//orig	restore_reg aster,	ipr=1
3089//orig	restore_reg astrr,	ipr=1
3090//orig	restore_reg sirr,	ipr=1
3091//orig	restore_reg maf_mode, 	ipr=1		// no mbox instruction for 3 cycles
3092//orig	mfpr	r31, pt0			// (may issue with mt maf_mode)
3093//orig	mfpr	r31, pt0			// bubble cycle 1
3094//orig	mfpr	r31, pt0                        // bubble cycle 2
3095//orig	mfpr	r31, pt0                        // bubble cycle 3
3096//orig	mfpr	r31, pt0			// (may issue with following ld)
3097
3098        // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
3099        RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
3100        RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
3101        RESTORE_IPR(ipl,CNS_Q_IPL,r1)
3102        RESTORE_IPR(ips,CNS_Q_IPS,r1)
3103        mtpr	r0, dtbCm			// Set Mbox current mode too.
3104        RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
3105        srl	r0, 4, r0
3106        sll	r0, 57, r0
3107        mtpr	r0, dtbAsn			// Set Mbox ASN too
3108        RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
3109        mtpr	r0, mVptBr			// Set Mbox VptBr too
3110        RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
3111        RESTORE_IPR(aster,CNS_Q_ASTER,r1)
3112        RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
3113        RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
3114        RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
3115        STALL
3116        STALL
3117        STALL
3118        STALL
3119        STALL
3120
3121
3122        // restore all integer shadow regs
3123//orig#define t 8
3124//orig	.repeat 7
3125//orig	  restore_reg \t, shadow=1
3126//orig#define t t + 1
3127//orig	.endr
3128//orig	restore_reg 25, shadow=1
3129//orig	restore_reg dc_mode, 	ipr=1		// no mbox instructions for 4 cycles
3130
3131        RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1)	// also called p0...p7 in the Hudson code
3132        RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
3133        RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
3134        RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
3135        RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
3136        RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
3137        RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
3138        RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
3139        RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
3140
3141        //
3142        // Get out of shadow mode
3143        //
3144
3145        mfpr	r31, pt0		// pad last load to icsr write (in case of replay, icsr will be written anyway)	//orig
3146        mfpr	r31, pt0		// ""										//orig
3147        mfpr	r0, icsr		// Get icsr									//orig
3148//orig	ldah	r2,  <1@<icsr_v_sde-16>>(r31)	// Get a one in SHADOW_ENABLE bit location
3149        ldah	r2,  (1<<(ICSR_V_SDE-16))(r31)	// Get a one in SHADOW_ENABLE bit location				//orig
3150        bic	r0, r2, r2		// ICSR with SDE clear								//orig
3151        mtpr	r2, icsr		// Turn off SDE - no palshadow rd/wr for 3 bubble cycles			//orig
3152
3153        mfpr	r31, pt0		// SDE bubble cycle 1								//orig
3154        mfpr	r31, pt0		// SDE bubble cycle 2								//orig
3155        mfpr	r31, pt0		// SDE bubble cycle 3								//orig
3156        nop														//orig
3157
3158//orig	// switch impure pointer from ipr to gpr area --
3159//orig	unfix_impure_ipr	r1
3160//orig	fix_impure_gpr	r1
3161//orig	// restore all integer regs
3162//orig#define t 4
3163//orig	.repeat 28
3164//orig	  restore_reg \t
3165//orig#define t t + 1
3166//orig	.endr
3167
3168// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
3169
3170        lda	r1, -CNS_Q_IPR(r1)	// Restore base address of impure area
3171        lda	r1, 0x200(r1)		// Point to center of CPU segment
3172
3173        RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
3174        RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
3175        RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
3176        RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
3177        RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
3178        RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
3179        RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
3180        RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
3181        RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
3182        RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
3183        RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
3184        RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
3185        RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
3186        RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
3187        RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
3188        RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
3189        RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
3190        RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
3191        RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
3192        RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
3193        RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
3194        RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
3195        RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
3196        RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
3197        RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
3198        RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
3199        RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
3200        RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
3201
3202//orig	// switch impure pointer from gpr to ipr area --
3203//orig	unfix_impure_gpr	r1
3204//orig	fix_impure_ipr	r1
3205//orig	restore_reg icsr, ipr=1		// restore original icsr- 4 bubbles to hw_rei
3206
3207        lda	t0, -0x200(t0)		// Restore base address of impure area.
3208        lda	t0, CNS_Q_IPR(t0)	// Point to base of IPR area again.
3209        RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
3210
3211//orig	// and back again --
3212//orig	unfix_impure_ipr	r1
3213//orig	fix_impure_gpr	r1
3214//orig	store_reg1 	flag, r31, r1, ipr=1 // clear dump area valid flag
3215//orig	mb
3216
3217        lda	t0, -CNS_Q_IPR(t0)	// Back to base of impure area again,
3218        lda	t0, 0x200(t0)		// and back to center of CPU segment
3219        SAVE_GPR(r31,CNS_Q_FLAG,r1)	// Clear the dump area valid flag
3220        mb
3221
3222//orig	// and back we go
3223//orig//	restore_reg 3
3224//orig	restore_reg 2
3225//orig//	restore_reg 1
3226//orig	restore_reg 0
3227//orig	// restore impure area base
3228//orig	unfix_impure_gpr r1
3229
3230        RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
3231        RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
3232        lda	r1, -0x200(r1)		// Restore impure base address
3233
3234        mfpr	r31, pt0		// stall for ldqp above		//orig
3235
3236        mtpr	r31, dtb_ia		// clear the tb			//orig
3237        mtpr	r31, itb_ia		// clear the itb		//orig
3238
3239//orig	pvc_jsr	rststa, bsr=1, dest=1
3240        ret	r31, (r3)		// back we go			//orig
3241#endif
3242
3243
3244//+
3245// pal_pal_bug_check -- code has found a bugcheck situation.
3246//	Set things up and join common machine check flow.
3247//
3248// Input:
3249//	r14 	- exc_addr
3250//
3251// On exit:
3252//	pt0	- saved r0
3253//	pt1	- saved	r1
3254//	pt4	- saved r4
3255//	pt5	- saved r5
3256//	pt6	- saved r6
3257//	pt10	- saved exc_addr
3258//       pt_misc<47:32> - mchk code
3259//       pt_misc<31:16> - scb vector
3260//	r14	- base of Cbox IPRs in IO space
3261//	MCES<mchk> is set
3262//-
3263
3264                ALIGN_BLOCK
3265        .globl pal_pal_bug_check_from_int
3266pal_pal_bug_check_from_int:
3267        DEBUGSTORE(0x79)
3268//simos	DEBUG_EXC_ADDR()
3269        DEBUGSTORE(0x20)
3270//simos	bsr	r25, put_hex
3271        lda	r25, mchk_c_bugcheck(r31)
3272        addq	r25, 1, r25			// set flag indicating we came from interrupt and stack is already pushed
3273        br	r31, pal_pal_mchk
3274        nop
3275
3276pal_pal_bug_check:
3277        lda     r25, mchk_c_bugcheck(r31)
3278
3279pal_pal_mchk:
3280        sll	r25, 32, r25			// Move mchk code to position
3281
3282        mtpr	r14, pt10			// Stash exc_addr
3283        mtpr	r14, exc_addr
3284
3285        mfpr	r12, pt_misc			// Get MCES and scratch
3286        zap	r12, 0x3c, r12
3287
3288        or	r12, r25, r12			// Combine mchk code
3289        lda	r25, scb_v_procmchk(r31)	// Get SCB vector
3290
3291        sll	r25, 16, r25			// Move SCBv to position
3292        or	r12, r25, r25			// Combine SCBv
3293
3294        mtpr	r0, pt0				// Stash for scratch
3295        bis	r25, mces_m_mchk, r25	// Set MCES<MCHK> bit
3296
3297        mtpr	r25, pt_misc			// Save mchk code!scbv!whami!mces
3298        ldah	r14, 0xfff0(r31)
3299
3300        mtpr	r1, pt1				// Stash for scratch
3301        zap	r14, 0xE0, r14			// Get Cbox IPR base
3302
3303        mtpr	r4, pt4
3304        mtpr	r5, pt5
3305
3306        mtpr	r6, pt6
3307        blbs	r12, sys_double_machine_check   // MCHK halt if double machine check
3308
3309        br	r31, sys_mchk_collect_iprs	// Join common machine check flow
3310
3311//	align_to_call_pal_section	// Align to address of first call_pal entry point - 2000
3312
3313// .sbttl	"HALT	- PALcode for HALT instruction"
3314
3315//+
3316//
3317// Entry:
3318//	Vectored into via hardware PALcode instruction dispatch.
3319//
3320// Function:
3321//	GO to console code
3322//
3323//-
3324
3325        .text	1
3326//	. = 0x2000
3327       CALL_PAL_PRIV(PAL_HALT_ENTRY)
3328call_pal_halt:
3329#if rax_mode == 0
3330        mfpr	r31, pt0		// Pad exc_addr read
3331        mfpr	r31, pt0
3332
3333        mfpr	r12, exc_addr		// get PC
3334        subq	r12, 4, r12		// Point to the HALT
3335
3336        mtpr	r12, exc_addr
3337        mtpr	r0, pt0
3338
3339//orig	pvc_jsr updpcb, bsr=1
3340        bsr    r0, pal_update_pcb      	// update the pcb
3341        lda    r0, hlt_c_sw_halt(r31)  	// set halt code to sw halt
3342        br     r31, sys_enter_console  	// enter the console
3343
3344#else					// RAX mode
3345        mb
3346        mb
3347        mtpr	r9, ev5__dtb_asn	// no Dstream virtual ref for next 3 cycles.
3348        mtpr	r9, ev5__itb_asn	// E1.  Update ITB ASN.  No hw_rei for 5 cycles.
3349        mtpr    r8, exc_addr		// no HW_REI for 1 cycle.
3350        blbc	r9, not_begin_case
3351        mtpr    r31, ev5__dtb_ia        // clear DTB. No Dstream virtual ref for 2 cycles.
3352        mtpr    r31, ev5__itb_ia        // clear ITB.
3353
3354not_begin_case:
3355        nop
3356        nop
3357
3358        nop
3359        nop				// pad mt itb_asn ->hw_rei_stall
3360
3361        hw_rei_stall
3362#endif
3363
3364// .sbttl	"CFLUSH- PALcode for CFLUSH instruction"
3365
3366//+
3367//
3368// Entry:
3369//	Vectored into via hardware PALcode instruction dispatch.
3370//
3371//	R16 - contains the PFN of the page to be flushed
3372//
3373// Function:
3374//	Flush all Dstream caches of 1 entire page
3375//	The CFLUSH routine is in the system specific module.
3376//
3377//-
3378
3379        CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
3380Call_Pal_Cflush:
3381        br	r31, sys_cflush
3382
3383// .sbttl	"DRAINA	- PALcode for DRAINA instruction"
3384//+
3385//
3386// Entry:
3387//	Vectored into via hardware PALcode instruction dispatch.
3388//	Implicit TRAPB performed by hardware.
3389//
3390// Function:
3391//	Stall instruction issue until all prior instructions are guaranteed to
3392//	complete without incurring aborts.  For the EV5 implementation, this
3393//	means waiting until all pending DREADS are returned.
3394//
3395//-
3396
3397        CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
3398Call_Pal_Draina:
3399        ldah	r14, 0x100(r31)		// Init counter.  Value?
3400        nop
3401
3402DRAINA_LOOP:
3403        subq	r14, 1, r14		// Decrement counter
3404        mfpr	r13, ev5__maf_mode	// Fetch status bit
3405
3406        srl	r13, maf_mode_v_dread_pending, r13
3407        ble	r14, DRAINA_LOOP_TOO_LONG
3408
3409        nop
3410        blbs	r13, DRAINA_LOOP	// Wait until all DREADS clear
3411
3412        hw_rei
3413
3414DRAINA_LOOP_TOO_LONG:
3415        br	r31, call_pal_halt
3416
3417// .sbttl	"CALL_PAL OPCDECs"
3418
3419        CALL_PAL_PRIV(0x0003)
3420CallPal_OpcDec03:
3421        br	r31, osfpal_calpal_opcdec
3422
3423        CALL_PAL_PRIV(0x0004)
3424CallPal_OpcDec04:
3425        br	r31, osfpal_calpal_opcdec
3426
3427        CALL_PAL_PRIV(0x0005)
3428CallPal_OpcDec05:
3429        br	r31, osfpal_calpal_opcdec
3430
3431        CALL_PAL_PRIV(0x0006)
3432CallPal_OpcDec06:
3433        br	r31, osfpal_calpal_opcdec
3434
3435        CALL_PAL_PRIV(0x0007)
3436CallPal_OpcDec07:
3437        br	r31, osfpal_calpal_opcdec
3438
3439        CALL_PAL_PRIV(0x0008)
3440CallPal_OpcDec08:
3441        br	r31, osfpal_calpal_opcdec
3442
3443// .sbttl	"CSERVE- PALcode for CSERVE instruction"
3444//+
3445//
3446// Entry:
3447//	Vectored into via hardware PALcode instruction dispatch.
3448//
3449// Function:
3450//       Various functions for private use of console software
3451//
3452//       option selector in r0
3453//       arguments in r16....
3454//	The CSERVE routine is in the system specific module.
3455//
3456//-
3457
3458        CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
3459Call_Pal_Cserve:
3460        br	r31, sys_cserve
3461
3462// .sbttl	"swppal - PALcode for swppal instruction"
3463
3464//+
3465//
3466// Entry:
3467//	Vectored into via hardware PALcode instruction dispatch.
3468//       Vectored into via hardware PALcode instruction dispatch.
3469//               R16 contains the new PAL identifier
3470//               R17:R21 contain implementation-specific entry parameters
3471//
3472//               R0  receives status:
3473//                0 success (PAL was switched)
3474//                1 unknown PAL variant
3475//                2 known PAL variant, but PAL not loaded
3476//
3477//
3478// Function:
3479//       Swap control to another PAL.
3480//-
3481
3482        CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
3483Call_Pal_Swppal:
3484        cmpule	r16, 255, r0		// see if a kibble was passed
3485        cmoveq  r16, r16, r0            // if r16=0 then a valid address (ECO 59)
3486
3487        or	r16, r31, r3		// set r3 incase this is a address
3488        blbc	r0, swppal_cont		// nope, try it as an address
3489
3490        cmpeq	r16, 2, r0		// is it our friend OSF?
3491        blbc	r0, swppal_fail		// nope, don't know this fellow
3492
3493        br	r2, CALL_PAL_SWPPAL_10_			// tis our buddy OSF
3494
3495//	.global	osfpal_hw_entry_reset
3496//	.weak	osfpal_hw_entry_reset
3497//	.long	<osfpal_hw_entry_reset-pal_start>
3498//orig	halt				// don't know how to get the address here - kludge ok, load pal at 0
3499        .long	0			// ?? hack upon hack...pb
3500
3501CALL_PAL_SWPPAL_10_: 	ldlp	r3, 0(r2)		// fetch target addr
3502//	ble	r3, swppal_fail		; if OSF not linked in say not loaded.
3503        mfpr	r2, pal_base		// fetch pal base
3504
3505        addq	r2, r3, r3		// add pal base
3506        lda	r2, 0x3FFF(r31)		// get pal base checker mask
3507
3508        and	r3, r2, r2		// any funky bits set?
3509        cmpeq	r2, 0, r0		//
3510
3511        blbc	r0, swppal_fail		// return unknown if bad bit set.
3512        br	r31, swppal_cont
3513
3514// .sbttl	"CALL_PAL OPCDECs"
3515
3516        CALL_PAL_PRIV(0x000B)
3517CallPal_OpcDec0B:
3518        br	r31, osfpal_calpal_opcdec
3519
3520        CALL_PAL_PRIV(0x000C)
3521CallPal_OpcDec0C:
3522        br	r31, osfpal_calpal_opcdec
3523
3524// .sbttl	"wripir- PALcode for wripir instruction"
3525//+
3526//
3527// Entry:
3528//	Vectored into via hardware PALcode instruction dispatch.
3529//	r16 = processor number to interrupt
3530//
3531// Function:
3532//	IPIR	<- R16
3533//	Handled in system-specific code
3534//
3535// Exit:
3536//	interprocessor interrupt is recorded on the target processor
3537//	and is initiated when the proper enabling conditions are present.
3538//-
3539
3540        CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
3541Call_Pal_Wrpir:
3542        br	r31, sys_wripir
3543
3544// .sbttl	"CALL_PAL OPCDECs"
3545
3546        CALL_PAL_PRIV(0x000E)
3547CallPal_OpcDec0E:
3548        br	r31, osfpal_calpal_opcdec
3549
3550        CALL_PAL_PRIV(0x000F)
3551CallPal_OpcDec0F:
3552        br	r31, osfpal_calpal_opcdec
3553
3554// .sbttl	"rdmces- PALcode for rdmces instruction"
3555
3556//+
3557//
3558// Entry:
3559//	Vectored into via hardware PALcode instruction dispatch.
3560//
3561// Function:
3562//	R0 <- ZEXT(MCES)
3563//-
3564
3565        CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
3566Call_Pal_Rdmces:
3567        mfpr	r0, pt_mces		// Read from PALtemp
3568        and	r0, mces_m_all, r0	// Clear other bits
3569
3570        hw_rei
3571
3572// .sbttl	"wrmces- PALcode for wrmces instruction"
3573
3574//+
3575//
3576// Entry:
3577//	Vectored into via hardware PALcode instruction dispatch.
3578//
3579// Function:
3580//	If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
3581//	If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
3582//	If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
3583//	MCES<3> <- R16<3>		   (DPC)
3584//	MCES<4> <- R16<4>		   (DSC)
3585//
3586//-
3587
3588        CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
3589Call_Pal_Wrmces:
3590        and	r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13	// Isolate MCHK, SCE, PCE
3591        mfpr	r14, pt_mces		// Get current value
3592
3593        ornot	r31, r13, r13		// Flip all the bits
3594        and	r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
3595
3596        and	r14, r13, r1		// Update MCHK, SCE, PCE
3597        bic	r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1	// Clear old DPC, DSC
3598
3599        or	r1, r17, r1		// Update DPC and DSC
3600        mtpr	r1, pt_mces		// Write MCES back
3601
3602#if rawhide_system == 0
3603        nop				// Pad to fix PT write->read restriction
3604#else
3605        blbs	r16, RAWHIDE_clear_mchk_lock	// Clear logout from lock
3606#endif
3607
3608        nop
3609        hw_rei
3610
3611
3612
3613// .sbttl	"CALL_PAL OPCDECs"
3614
3615        CALL_PAL_PRIV(0x0012)
3616CallPal_OpcDec12:
3617        br	r31, osfpal_calpal_opcdec
3618
3619        CALL_PAL_PRIV(0x0013)
3620CallPal_OpcDec13:
3621        br	r31, osfpal_calpal_opcdec
3622
3623        CALL_PAL_PRIV(0x0014)
3624CallPal_OpcDec14:
3625        br	r31, osfpal_calpal_opcdec
3626
3627        CALL_PAL_PRIV(0x0015)
3628CallPal_OpcDec15:
3629        br	r31, osfpal_calpal_opcdec
3630
3631        CALL_PAL_PRIV(0x0016)
3632CallPal_OpcDec16:
3633        br	r31, osfpal_calpal_opcdec
3634
3635        CALL_PAL_PRIV(0x0017)
3636CallPal_OpcDec17:
3637        br	r31, osfpal_calpal_opcdec
3638
3639        CALL_PAL_PRIV(0x0018)
3640CallPal_OpcDec18:
3641        br	r31, osfpal_calpal_opcdec
3642
3643        CALL_PAL_PRIV(0x0019)
3644CallPal_OpcDec19:
3645        br	r31, osfpal_calpal_opcdec
3646
3647        CALL_PAL_PRIV(0x001A)
3648CallPal_OpcDec1A:
3649        br	r31, osfpal_calpal_opcdec
3650
3651        CALL_PAL_PRIV(0x001B)
3652CallPal_OpcDec1B:
3653        br	r31, osfpal_calpal_opcdec
3654
3655        CALL_PAL_PRIV(0x001C)
3656CallPal_OpcDec1C:
3657        br	r31, osfpal_calpal_opcdec
3658
3659        CALL_PAL_PRIV(0x001D)
3660CallPal_OpcDec1D:
3661        br	r31, osfpal_calpal_opcdec
3662
3663        CALL_PAL_PRIV(0x001E)
3664CallPal_OpcDec1E:
3665        br	r31, osfpal_calpal_opcdec
3666
3667        CALL_PAL_PRIV(0x001F)
3668CallPal_OpcDec1F:
3669        br	r31, osfpal_calpal_opcdec
3670
3671        CALL_PAL_PRIV(0x0020)
3672CallPal_OpcDec20:
3673        br	r31, osfpal_calpal_opcdec
3674
3675        CALL_PAL_PRIV(0x0021)
3676CallPal_OpcDec21:
3677        br	r31, osfpal_calpal_opcdec
3678
3679        CALL_PAL_PRIV(0x0022)
3680CallPal_OpcDec22:
3681        br	r31, osfpal_calpal_opcdec
3682
3683        CALL_PAL_PRIV(0x0023)
3684CallPal_OpcDec23:
3685        br	r31, osfpal_calpal_opcdec
3686
3687        CALL_PAL_PRIV(0x0024)
3688CallPal_OpcDec24:
3689        br	r31, osfpal_calpal_opcdec
3690
3691        CALL_PAL_PRIV(0x0025)
3692CallPal_OpcDec25:
3693        br	r31, osfpal_calpal_opcdec
3694
3695        CALL_PAL_PRIV(0x0026)
3696CallPal_OpcDec26:
3697        br	r31, osfpal_calpal_opcdec
3698
3699        CALL_PAL_PRIV(0x0027)
3700CallPal_OpcDec27:
3701        br	r31, osfpal_calpal_opcdec
3702
3703        CALL_PAL_PRIV(0x0028)
3704CallPal_OpcDec28:
3705        br	r31, osfpal_calpal_opcdec
3706
3707        CALL_PAL_PRIV(0x0029)
3708CallPal_OpcDec29:
3709        br	r31, osfpal_calpal_opcdec
3710
3711        CALL_PAL_PRIV(0x002A)
3712CallPal_OpcDec2A:
3713        br	r31, osfpal_calpal_opcdec
3714
3715// .sbttl	"wrfen - PALcode for wrfen instruction"
3716
3717//+
3718//
3719// Entry:
3720//	Vectored into via hardware PALcode instruction dispatch.
3721//
3722// Function:
3723//	a0<0> -> ICSR<FPE>
3724//	Store new FEN in PCB
3725//	Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16) are UNPREDICTABLE
3726//
3727// Issue: What about pending FP loads when FEN goes from on->off????
3728//-
3729
3730        CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
3731Call_Pal_Wrfen:
3732        or	r31, 1, r13		// Get a one
3733        mfpr	r1, ev5__icsr		// Get current FPE
3734
3735        sll	r13, icsr_v_fpe, r13	// shift 1 to icsr<fpe> spot, e0
3736        and	r16, 1, r16		// clean new fen
3737
3738        sll	r16, icsr_v_fpe, r12	// shift new fen to correct bit position
3739        bic	r1, r13, r1		// zero icsr<fpe>
3740
3741        or	r1, r12, r1		// Or new FEN into ICSR
3742        mfpr	r12, pt_pcbb		// Get PCBB - E1
3743
3744        mtpr	r1, ev5__icsr		// write new ICSR.  3 Bubble cycles to HW_REI
3745        stlp	r16, osfpcb_q_fen(r12)	// Store FEN in PCB.
3746
3747        mfpr	r31, pt0		// Pad ICSR<FPE> write.
3748        mfpr	r31, pt0
3749
3750        mfpr	r31, pt0
3751//	pvc_violate 	225		// cuz PVC can't distinguish which bits changed
3752        hw_rei
3753
3754
3755        CALL_PAL_PRIV(0x002C)
3756CallPal_OpcDec2C:
3757        br	r31, osfpal_calpal_opcdec
3758
3759// .sbttl	"wrvptpr - PALcode for wrvptpr instruction"
3760//+
3761//
3762// Entry:
3763//	Vectored into via hardware PALcode instruction dispatch.
3764//
3765// Function:
3766//	vptptr <- a0 (r16)
3767//-
3768
3769        CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
3770Call_Pal_Wrvptptr:
3771        mtpr    r16, ev5__mvptbr                // Load Mbox copy
3772        mtpr    r16, ev5__ivptbr                // Load Ibox copy
3773        nop                                     // Pad IPR write
3774        nop
3775        hw_rei
3776
3777        CALL_PAL_PRIV(0x002E)
3778CallPal_OpcDec2E:
3779        br	r31, osfpal_calpal_opcdec
3780
3781        CALL_PAL_PRIV(0x002F)
3782CallPal_OpcDec2F:
3783        br	r31, osfpal_calpal_opcdec
3784
3785// .sbttl	"swpctx- PALcode for swpctx instruction"
3786
3787//+
3788//
3789// Entry:
3790//       hardware dispatch via callPal instruction
3791//       R16 -> new pcb
3792//
3793// Function:
3794//       dynamic state moved to old pcb
3795//       new state loaded from new pcb
3796//       pcbb pointer set
3797//       old pcbb returned in R0
3798//
3799//  Note: need to add perf monitor stuff
3800//-
3801
3802        CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
3803Call_Pal_Swpctx:
3804        rpcc	r13			// get cyccounter
3805        mfpr	r0, pt_pcbb		// get pcbb
3806
3807        ldqp	r22, osfpcb_q_fen(r16)	// get new fen/pme
3808        ldqp	r23, osfpcb_l_cc(r16)	// get new asn
3809
3810        srl	r13, 32, r25		// move offset
3811        mfpr	r24, pt_usp		// get usp
3812
3813        stqp	r30, osfpcb_q_ksp(r0)	// store old ksp
3814//	pvc_violate 379			// stqp can't trap except replay.  only problem if mf same ipr in same shadow.
3815        mtpr	r16, pt_pcbb		// set new pcbb
3816
3817        stqp	r24, osfpcb_q_usp(r0)	// store usp
3818        addl	r13, r25, r25		// merge for new time
3819
3820        stlp	r25, osfpcb_l_cc(r0)	// save time
3821        ldah	r24, (1<<(icsr_v_fpe-16))(r31)
3822
3823        and	r22, 1, r12		// isolate fen
3824        mfpr	r25, icsr		// get current icsr
3825
3826        ev5_pass2 	lda	r24, (1<<icsr_v_pmp)(r24)
3827        br	r31, swpctx_cont
3828
3829// .sbttl	"wrval - PALcode for wrval instruction"
3830//+
3831//
3832// Entry:
3833//	Vectored into via hardware PALcode instruction dispatch.
3834//
3835// Function:
3836//	sysvalue <- a0 (r16)
3837//-
3838
3839        CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
3840Call_Pal_Wrval:
3841        nop
3842        mtpr	r16, pt_sysval		// Pad paltemp write
3843        nop
3844        nop
3845        hw_rei
3846
3847
3848// .sbttl	"rdval - PALcode for rdval instruction"
3849
3850//+
3851//
3852// Entry:
3853//	Vectored into via hardware PALcode instruction dispatch.
3854//
3855// Function:
3856//	v0 (r0) <- sysvalue
3857//-
3858
3859        CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
3860Call_Pal_Rdval:
3861        nop
3862        mfpr	r0, pt_sysval
3863        nop
3864        hw_rei
3865
3866// .sbttl	"tbi - PALcode for tbi instruction"
3867//+
3868//
3869// Entry:
3870//	Vectored into via hardware PALcode instruction dispatch.
3871//
3872// Function:
3873//	TB invalidate
3874//       r16/a0 = TBI type
3875//       r17/a1 = Va for TBISx instructions
3876//-
3877
3878        CALL_PAL_PRIV(PAL_TBI_ENTRY)
3879Call_Pal_Tbi:
3880        addq	r16, 2, r16			// change range to 0-2
3881        br	r23, CALL_PAL_tbi_10_		// get our address
3882
3883CALL_PAL_tbi_10_: cmpult	r16, 6, r22		// see if in range
3884        lda	r23, tbi_tbl-CALL_PAL_tbi_10_(r23)	// set base to start of table
3885        sll	r16, 4, r16		// * 16
3886        blbc	r22, CALL_PAL_tbi_30_		// go rei, if not
3887
3888        addq	r23, r16, r23		// addr of our code
3889//orig	pvc_jsr	tbi
3890        jmp	r31, (r23)		// and go do it
3891
3892CALL_PAL_tbi_30_:
3893        hw_rei
3894        nop
3895
3896// .sbttl	"wrent - PALcode for wrent instruction"
3897//+
3898//
3899// Entry:
3900//	Vectored into via hardware PALcode instruction dispatch.
3901//
3902// Function:
3903//	Update ent* in paltemps
3904//       r16/a0 = Address of entry routine
3905//       r17/a1 = Entry Number 0..5
3906//
3907//       r22, r23 trashed
3908//-
3909
3910        CALL_PAL_PRIV(PAL_WRENT_ENTRY)
3911Call_Pal_Wrent:
3912        cmpult	r17, 6, r22			// see if in range
3913        br	r23, CALL_PAL_wrent_10_		// get our address
3914
3915CALL_PAL_wrent_10_:	bic	r16, 3, r16	// clean pc
3916        blbc	r22, CALL_PAL_wrent_30_		// go rei, if not in range
3917
3918        lda	r23, wrent_tbl-CALL_PAL_wrent_10_(r23)	// set base to start of table
3919        sll	r17, 4, r17				// *16
3920
3921        addq  	r17, r23, r23		// Get address in table
3922//orig	pvc_jsr	wrent
3923        jmp	r31, (r23)		// and go do it
3924
3925CALL_PAL_wrent_30_:
3926        hw_rei				// out of range, just return
3927
3928// .sbttl	"swpipl - PALcode for swpipl instruction"
3929//+
3930//
3931// Entry:
3932//	Vectored into via hardware PALcode instruction dispatch.
3933//
3934// Function:
3935//	v0 (r0)  <- PS<IPL>
3936//	PS<IPL>  <- a0<2:0>  (r16)
3937//
3938//	t8 (r22) is scratch
3939//-
3940
3941        CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3942Call_Pal_Swpipl:
3943        and	r16, osfps_m_ipl, r16	// clean New ipl
3944        mfpr	r22, pt_intmask		// get int mask
3945
3946        extbl	r22, r16, r22		// get mask for this ipl
3947        bis	r11, r31, r0		// return old ipl
3948
3949        bis	r16, r31, r11		// set new ps
3950        mtpr	r22, ev5__ipl		// set new mask
3951
3952        mfpr	r31, pt0		// pad ipl write
3953        mfpr	r31, pt0		// pad ipl write
3954
3955        hw_rei				// back
3956
3957// .sbttl	"rdps - PALcode for rdps instruction"
3958//+
3959//
3960// Entry:
3961//	Vectored into via hardware PALcode instruction dispatch.
3962//
3963// Function:
3964//	v0 (r0) <- ps
3965//-
3966
3967        CALL_PAL_PRIV(PAL_RDPS_ENTRY)
3968Call_Pal_Rdps:
3969        bis	r11, r31, r0		// Fetch PALshadow PS
3970        nop				// Must be 2 cycles long
3971        hw_rei
3972
3973// .sbttl	"wrkgp - PALcode for wrkgp instruction"
3974//+
3975//
3976// Entry:
3977//	Vectored into via hardware PALcode instruction dispatch.
3978//
3979// Function:
3980//	kgp <- a0 (r16)
3981//-
3982
3983        CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
3984Call_Pal_Wrkgp:
3985        nop
3986        mtpr	r16, pt_kgp
3987        nop				// Pad for pt write->read restriction
3988        nop
3989        hw_rei
3990
3991// .sbttl	"wrusp - PALcode for wrusp instruction"
3992//+
3993//
3994// Entry:
3995//	Vectored into via hardware PALcode instruction dispatch.
3996//
3997// Function:
3998//       usp <- a0 (r16)
3999//-
4000
4001        CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
4002Call_Pal_Wrusp:
4003        nop
4004        mtpr	r16, pt_usp
4005        nop				// Pad possible pt write->read restriction
4006        nop
4007        hw_rei
4008
4009// .sbttl	"wrperfmon - PALcode for wrperfmon instruction"
4010//+
4011//
4012// Entry:
4013//	Vectored into via hardware PALcode instruction dispatch.
4014//
4015//
4016// Function:
4017//	Various control functions for the onchip performance counters
4018//
4019//	option selector in r16
4020//	option argument in r17
4021//	returned status in r0
4022//
4023//
4024//	r16 = 0	Disable performance monitoring for one or more cpu's
4025//	  r17 = 0		disable no counters
4026//	  r17 = bitmask		disable counters specified in bit mask (1=disable)
4027//
4028//	r16 = 1	Enable performance monitoring for one or more cpu's
4029//	  r17 = 0		enable no counters
4030//	  r17 = bitmask		enable counters specified in bit mask (1=enable)
4031//
4032//	r16 = 2	Mux select for one or more cpu's
4033//	  r17 = Mux selection (cpu specific)
4034//    		<24:19>  	 bc_ctl<pm_mux_sel> field (see spec)
4035//		<31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
4036//
4037//	r16 = 3	Options
4038//	  r17 = (cpu specific)
4039//		<0> = 0 	log all processes
4040//		<0> = 1		log only selected processes
4041//		<30,9,8> 		mode select - ku,kp,kk
4042//
4043//	r16 = 4	Interrupt frequency select
4044//	  r17 = (cpu specific)	indicates interrupt frequencies desired for each
4045//				counter, with "zero interrupts" being an option
4046//				frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
4047//
4048//	r16 = 5	Read Counters
4049//	  r17 = na
4050//	  r0  = value (same format as ev5 pmctr)
4051//	        <0> = 0		Read failed
4052//	        <0> = 1		Read succeeded
4053//
4054//	r16 = 6	Write Counters
4055//	  r17 = value (same format as ev5 pmctr; all counters written simultaneously)
4056//
4057//	r16 = 7	Enable performance monitoring for one or more cpu's and reset counter to 0
4058//	  r17 = 0		enable no counters
4059//	  r17 = bitmask		enable & clear counters specified in bit mask (1=enable & clear)
4060//
4061//=============================================================================
4062//Assumptions:
4063//PMCTR_CTL:
4064//
4065//       <15:14>         CTL0 -- encoded frequency select and enable - CTR0
4066//       <13:12>         CTL1 --			"		   - CTR1
4067//       <11:10>         CTL2 --			"		   - CTR2
4068//
4069//       <9:8>           FRQ0 -- frequency select for CTR0 (no enable info)
4070//       <7:6>           FRQ1 -- frequency select for CTR1
4071//       <5:4>           FRQ2 -- frequency select for CTR2
4072//
4073//       <0>		all vs. select processes (0=all,1=select)
4074//
4075//     where
4076//	FRQx<1:0>
4077//	     0 1	disable interrupt
4078//	     1 0	frequency = 65536 (16384 for ctr2)
4079//	     1 1	frequency = 256
4080//	note:  FRQx<1:0> = 00 will keep counters from ever being enabled.
4081//
4082//=============================================================================
4083//
4084        CALL_PAL_PRIV(0x0039)
4085// unsupported in Hudson code .. pboyle Nov/95
4086CALL_PAL_Wrperfmon:
4087#if perfmon_debug == 0
4088        // "real" performance monitoring code
4089        cmpeq	r16, 1, r0		// check for enable
4090        bne	r0, perfmon_en		// br if requested to enable
4091
4092        cmpeq	r16, 2, r0		// check for mux ctl
4093        bne	r0, perfmon_muxctl	// br if request to set mux controls
4094
4095        cmpeq	r16, 3, r0		// check for options
4096        bne	r0, perfmon_ctl		// br if request to set options
4097
4098        cmpeq	r16, 4, r0		// check for interrupt frequency select
4099        bne	r0, perfmon_freq	// br if request to change frequency select
4100
4101        cmpeq	r16, 5, r0		// check for counter read request
4102        bne	r0, perfmon_rd		// br if request to read counters
4103
4104        cmpeq	r16, 6, r0		// check for counter write request
4105        bne	r0, perfmon_wr		// br if request to write counters
4106
4107        cmpeq	r16, 7, r0		// check for counter clear/enable request
4108        bne	r0, perfmon_enclr	// br if request to clear/enable counters
4109
4110        beq	r16, perfmon_dis	// br if requested to disable (r16=0)
4111        br	r31, perfmon_unknown	// br if unknown request
4112#else
4113
4114        br	r31, pal_perfmon_debug
4115#endif
4116
4117// .sbttl	"rdusp - PALcode for rdusp instruction"
4118//+
4119//
4120// Entry:
4121//	Vectored into via hardware PALcode instruction dispatch.
4122//
4123// Function:
4124//	v0 (r0) <- usp
4125//-
4126
4127        CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
4128Call_Pal_Rdusp:
4129        nop
4130        mfpr	r0, pt_usp
4131        hw_rei
4132
4133
4134        CALL_PAL_PRIV(0x003B)
4135CallPal_OpcDec3B:
4136        br	r31, osfpal_calpal_opcdec
4137
4138// .sbttl	"whami - PALcode for whami instruction"
4139//+
4140//
4141// Entry:
4142//	Vectored into via hardware PALcode instruction dispatch.
4143//
4144// Function:
4145//	v0 (r0) <- whami
4146//-
4147        CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
4148Call_Pal_Whami:
4149        nop
4150        mfpr    r0, pt_whami            // Get Whami
4151        extbl	r0, 1, r0		// Isolate just whami bits
4152        hw_rei
4153
4154// .sbttl	"retsys - PALcode for retsys instruction"
4155//
4156// Entry:
4157//	Vectored into via hardware PALcode instruction dispatch.
4158//       00(sp) contains return pc
4159//       08(sp) contains r29
4160//
4161// Function:
4162//	Return from system call.
4163//       mode switched from kern to user.
4164//       stacks swapped, ugp, upc restored.
4165//       r23, r25 junked
4166//-
4167
4168        CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
4169Call_Pal_Retsys:
4170        lda	r25, osfsf_c_size(sp) 	// pop stack
4171        bis	r25, r31, r14		// touch r25 & r14 to stall mf exc_addr
4172
4173        mfpr	r14, exc_addr		// save exc_addr in case of fault
4174        ldq	r23, osfsf_pc(sp) 	// get pc
4175
4176        ldq	r29, osfsf_gp(sp) 	// get gp
4177        stl_c	r31, -4(sp)		// clear lock_flag
4178
4179        lda	r11, 1<<osfps_v_mode(r31)// new PS:mode=user
4180        mfpr	r30, pt_usp		// get users stack
4181
4182        bic	r23, 3, r23		// clean return pc
4183        mtpr	r31, ev5__ipl		// zero ibox IPL - 2 bubbles to hw_rei
4184
4185        mtpr	r11, ev5__dtb_cm	// set Mbox current mode - no virt ref for 2 cycles
4186        mtpr	r11, ev5__ps		// set Ibox current mode - 2 bubble to hw_rei
4187
4188        mtpr	r23, exc_addr		// set return address - 1 bubble to hw_rei
4189        mtpr	r25, pt_ksp		// save kern stack
4190
4191        rc	r31			// clear inter_flag
4192//	pvc_violate 248			// possible hidden mt->mf pt violation ok in callpal
4193        hw_rei_spe			// and back
4194
4195
4196        CALL_PAL_PRIV(0x003E)
4197CallPal_OpcDec3E:
4198        br	r31, osfpal_calpal_opcdec
4199
4200// .sbttl	"rti - PALcode for rti instruction"
4201//+
4202//
4203// Entry:
4204//	Vectored into via hardware PALcode instruction dispatch.
4205//
4206// Function:
4207//	00(sp) -> ps
4208//	08(sp) -> pc
4209//	16(sp) -> r29 (gp)
4210//	24(sp) -> r16 (a0)
4211//	32(sp) -> r17 (a1)
4212//	40(sp) -> r18 (a3)
4213//-
4214
4215        CALL_PAL_PRIV(PAL_RTI_ENTRY)
4216#ifdef SIMOS
4217        /* called once by platform_tlaser */
4218        .globl Call_Pal_Rti
4219#endif
4220Call_Pal_Rti:
4221        lda	r25, osfsf_c_size(sp)	// get updated sp
4222        bis	r25, r31, r14		// touch r14,r25 to stall mf exc_addr
4223
4224        mfpr	r14, exc_addr		// save PC in case of fault
4225        rc	r31			// clear intr_flag
4226
4227        ldq	r12, -6*8(r25)		// get ps
4228        ldq	r13, -5*8(r25)		// pc
4229
4230        ldq	r18, -1*8(r25)		// a2
4231        ldq	r17, -2*8(r25)		// a1
4232
4233        ldq	r16, -3*8(r25)		// a0
4234        ldq	r29, -4*8(r25)		// gp
4235
4236        bic	r13, 3, r13		// clean return pc
4237        stl_c	r31, -4(r25)		// clear lock_flag
4238
4239        and	r12, osfps_m_mode, r11	// get mode
4240        mtpr	r13, exc_addr		// set return address
4241
4242        beq	r11, rti_to_kern	// br if rti to Kern
4243        br	r31, rti_to_user	// out of call_pal space
4244
4245
4246// .sbttl  "Start the Unprivileged CALL_PAL Entry Points"
4247// .sbttl	"bpt- PALcode for bpt instruction"
4248//+
4249//
4250// Entry:
4251//	Vectored into via hardware PALcode instruction dispatch.
4252//
4253// Function:
4254//	Build stack frame
4255//	a0 <- code
4256//	a1 <- unpred
4257//	a2 <- unpred
4258//	vector via entIF
4259//
4260//-
4261//
4262        .text	1
4263//	. = 0x3000
4264        CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
4265Call_Pal_Bpt:
4266        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4267        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
4268
4269        bis	r11, r31, r12		// Save PS for stack write
4270        bge	r25, CALL_PAL_bpt_10_		// no stack swap needed if cm=kern
4271
4272        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
4273                                        //     no virt ref for next 2 cycles
4274        mtpr	r30, pt_usp		// save user stack
4275
4276        bis	r31, r31, r11		// Set new PS
4277        mfpr	r30, pt_ksp
4278
4279CALL_PAL_bpt_10_:
4280        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
4281        mfpr	r14, exc_addr		// get pc
4282
4283        stq	r16, osfsf_a0(sp)	// save regs
4284        bis	r31, osf_a0_bpt, r16	// set a0
4285
4286        stq	r17, osfsf_a1(sp)	// a1
4287        br	r31, bpt_bchk_common	// out of call_pal space
4288
4289
4290// .sbttl	"bugchk- PALcode for bugchk instruction"
4291//+
4292//
4293// Entry:
4294//	Vectored into via hardware PALcode instruction dispatch.
4295//
4296// Function:
4297//	Build stack frame
4298//	a0 <- code
4299//	a1 <- unpred
4300//	a2 <- unpred
4301//	vector via entIF
4302//
4303//-
4304//
4305        CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
4306Call_Pal_Bugchk:
4307        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4308        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
4309
4310        bis	r11, r31, r12		// Save PS for stack write
4311        bge	r25, CALL_PAL_bugchk_10_		// no stack swap needed if cm=kern
4312
4313        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
4314                                        //     no virt ref for next 2 cycles
4315        mtpr	r30, pt_usp		// save user stack
4316
4317        bis	r31, r31, r11		// Set new PS
4318        mfpr	r30, pt_ksp
4319
4320CALL_PAL_bugchk_10_:
4321        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
4322        mfpr	r14, exc_addr		// get pc
4323
4324        stq	r16, osfsf_a0(sp)	// save regs
4325        bis	r31, osf_a0_bugchk, r16	// set a0
4326
4327        stq	r17, osfsf_a1(sp)	// a1
4328        br	r31, bpt_bchk_common	// out of call_pal space
4329
4330
4331        CALL_PAL_UNPRIV(0x0082)
4332CallPal_OpcDec82:
4333        br	r31, osfpal_calpal_opcdec
4334
4335// .sbttl	"callsys - PALcode for callsys instruction"
4336//+
4337//
4338// Entry:
4339//	Vectored into via hardware PALcode instruction dispatch.
4340//
4341// Function:
4342// 	Switch mode to kernel and build a callsys stack frame.
4343//       sp = ksp
4344//       gp = kgp
4345//	t8 - t10 (r22-r24) trashed
4346//
4347//-
4348//
4349        CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
4350Call_Pal_Callsys:
4351
4352        and	r11, osfps_m_mode, r24	// get mode
4353        mfpr	r22, pt_ksp		// get ksp
4354
4355        beq	r24, sys_from_kern 	// sysCall from kern is not allowed
4356        mfpr	r12, pt_entsys		// get address of callSys routine
4357
4358//+
4359// from here on we know we are in user going to Kern
4360//-
4361        mtpr	r31, ev5__dtb_cm	// set Mbox current mode - no virt ref for 2 cycles
4362        mtpr	r31, ev5__ps		// set Ibox current mode - 2 bubble to hw_rei
4363
4364        bis	r31, r31, r11		// PS=0 (mode=kern)
4365        mfpr	r23, exc_addr		// get pc
4366
4367        mtpr	r30, pt_usp		// save usp
4368        lda	sp, 0-osfsf_c_size(r22)// set new sp
4369
4370        stq	r29, osfsf_gp(sp)	// save user gp/r29
4371        stq	r24, osfsf_ps(sp)	// save ps
4372
4373        stq	r23, osfsf_pc(sp)	// save pc
4374        mtpr	r12, exc_addr		// set address
4375                                        // 1 cycle to hw_rei
4376
4377        mfpr	r29, pt_kgp		// get the kern gp/r29
4378
4379        hw_rei_spe			// and off we go!
4380
4381
4382        CALL_PAL_UNPRIV(0x0084)
4383CallPal_OpcDec84:
4384        br	r31, osfpal_calpal_opcdec
4385
4386        CALL_PAL_UNPRIV(0x0085)
4387CallPal_OpcDec85:
4388        br	r31, osfpal_calpal_opcdec
4389
4390// .sbttl	"imb - PALcode for imb instruction"
4391//+
4392//
4393// Entry:
4394//	Vectored into via hardware PALcode instruction dispatch.
4395//
4396// Function:
4397//       Flush the writebuffer and flush the Icache
4398//
4399//-
4400//
4401        CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
4402Call_Pal_Imb:
4403        mb                              // Clear the writebuffer
4404        mfpr    r31, ev5__mcsr          // Sync with clear
4405        nop
4406        nop
4407        br      r31, pal_ic_flush           // Flush Icache
4408
4409
4410// .sbttl	"CALL_PAL OPCDECs"
4411
4412        CALL_PAL_UNPRIV(0x0087)
4413CallPal_OpcDec87:
4414        br	r31, osfpal_calpal_opcdec
4415
4416        CALL_PAL_UNPRIV(0x0088)
4417CallPal_OpcDec88:
4418        br	r31, osfpal_calpal_opcdec
4419
4420        CALL_PAL_UNPRIV(0x0089)
4421CallPal_OpcDec89:
4422        br	r31, osfpal_calpal_opcdec
4423
4424        CALL_PAL_UNPRIV(0x008A)
4425CallPal_OpcDec8A:
4426        br	r31, osfpal_calpal_opcdec
4427
4428        CALL_PAL_UNPRIV(0x008B)
4429CallPal_OpcDec8B:
4430        br	r31, osfpal_calpal_opcdec
4431
4432        CALL_PAL_UNPRIV(0x008C)
4433CallPal_OpcDec8C:
4434        br	r31, osfpal_calpal_opcdec
4435
4436        CALL_PAL_UNPRIV(0x008D)
4437CallPal_OpcDec8D:
4438        br	r31, osfpal_calpal_opcdec
4439
4440        CALL_PAL_UNPRIV(0x008E)
4441CallPal_OpcDec8E:
4442        br	r31, osfpal_calpal_opcdec
4443
4444        CALL_PAL_UNPRIV(0x008F)
4445CallPal_OpcDec8F:
4446        br	r31, osfpal_calpal_opcdec
4447
4448        CALL_PAL_UNPRIV(0x0090)
4449CallPal_OpcDec90:
4450        br	r31, osfpal_calpal_opcdec
4451
4452        CALL_PAL_UNPRIV(0x0091)
4453CallPal_OpcDec91:
4454        br	r31, osfpal_calpal_opcdec
4455
4456        CALL_PAL_UNPRIV(0x0092)
4457CallPal_OpcDec92:
4458        br	r31, osfpal_calpal_opcdec
4459
4460        CALL_PAL_UNPRIV(0x0093)
4461CallPal_OpcDec93:
4462        br	r31, osfpal_calpal_opcdec
4463
4464        CALL_PAL_UNPRIV(0x0094)
4465CallPal_OpcDec94:
4466        br	r31, osfpal_calpal_opcdec
4467
4468        CALL_PAL_UNPRIV(0x0095)
4469CallPal_OpcDec95:
4470        br	r31, osfpal_calpal_opcdec
4471
4472        CALL_PAL_UNPRIV(0x0096)
4473CallPal_OpcDec96:
4474        br	r31, osfpal_calpal_opcdec
4475
4476        CALL_PAL_UNPRIV(0x0097)
4477CallPal_OpcDec97:
4478        br	r31, osfpal_calpal_opcdec
4479
4480        CALL_PAL_UNPRIV(0x0098)
4481CallPal_OpcDec98:
4482        br	r31, osfpal_calpal_opcdec
4483
4484        CALL_PAL_UNPRIV(0x0099)
4485CallPal_OpcDec99:
4486        br	r31, osfpal_calpal_opcdec
4487
4488        CALL_PAL_UNPRIV(0x009A)
4489CallPal_OpcDec9A:
4490        br	r31, osfpal_calpal_opcdec
4491
4492        CALL_PAL_UNPRIV(0x009B)
4493CallPal_OpcDec9B:
4494        br	r31, osfpal_calpal_opcdec
4495
4496        CALL_PAL_UNPRIV(0x009C)
4497CallPal_OpcDec9C:
4498        br	r31, osfpal_calpal_opcdec
4499
4500        CALL_PAL_UNPRIV(0x009D)
4501CallPal_OpcDec9D:
4502        br	r31, osfpal_calpal_opcdec
4503
4504// .sbttl	"rdunique - PALcode for rdunique instruction"
4505//+
4506//
4507// Entry:
4508//	Vectored into via hardware PALcode instruction dispatch.
4509//
4510// Function:
4511//	v0 (r0) <- unique
4512//
4513//-
4514//
4515        CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
4516CALL_PALrdunique_:
4517        mfpr	r0, pt_pcbb		// get pcb pointer
4518        ldqp	r0, osfpcb_q_unique(r0) // get new value
4519
4520        hw_rei
4521
4522// .sbttl	"wrunique - PALcode for wrunique instruction"
4523//+
4524//
4525// Entry:
4526//	Vectored into via hardware PALcode instruction dispatch.
4527//
4528// Function:
4529//	unique <- a0 (r16)
4530//
4531//-
4532//
4533CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
4534CALL_PAL_Wrunique:
4535        nop
4536        mfpr	r12, pt_pcbb		// get pcb pointer
4537        stqp	r16, osfpcb_q_unique(r12)// get new value
4538        nop				// Pad palshadow write
4539        hw_rei				// back
4540
4541// .sbttl	"CALL_PAL OPCDECs"
4542
4543        CALL_PAL_UNPRIV(0x00A0)
4544CallPal_OpcDecA0:
4545        br	r31, osfpal_calpal_opcdec
4546
4547        CALL_PAL_UNPRIV(0x00A1)
4548CallPal_OpcDecA1:
4549        br	r31, osfpal_calpal_opcdec
4550
4551        CALL_PAL_UNPRIV(0x00A2)
4552CallPal_OpcDecA2:
4553        br	r31, osfpal_calpal_opcdec
4554
4555        CALL_PAL_UNPRIV(0x00A3)
4556CallPal_OpcDecA3:
4557        br	r31, osfpal_calpal_opcdec
4558
4559        CALL_PAL_UNPRIV(0x00A4)
4560CallPal_OpcDecA4:
4561        br	r31, osfpal_calpal_opcdec
4562
4563        CALL_PAL_UNPRIV(0x00A5)
4564CallPal_OpcDecA5:
4565        br	r31, osfpal_calpal_opcdec
4566
4567        CALL_PAL_UNPRIV(0x00A6)
4568CallPal_OpcDecA6:
4569        br	r31, osfpal_calpal_opcdec
4570
4571        CALL_PAL_UNPRIV(0x00A7)
4572CallPal_OpcDecA7:
4573        br	r31, osfpal_calpal_opcdec
4574
4575        CALL_PAL_UNPRIV(0x00A8)
4576CallPal_OpcDecA8:
4577        br	r31, osfpal_calpal_opcdec
4578
4579        CALL_PAL_UNPRIV(0x00A9)
4580CallPal_OpcDecA9:
4581        br	r31, osfpal_calpal_opcdec
4582
4583
4584// .sbttl	"gentrap - PALcode for gentrap instruction"
4585//+
4586// CALL_PAL_gentrap:
4587// Entry:
4588//	Vectored into via hardware PALcode instruction dispatch.
4589//
4590// Function:
4591//	Build stack frame
4592//	a0 <- code
4593//	a1 <- unpred
4594//	a2 <- unpred
4595//	vector via entIF
4596//
4597//-
4598
4599        CALL_PAL_UNPRIV(0x00AA)
4600// unsupported in Hudson code .. pboyle Nov/95
4601CALL_PAL_gentrap:
4602        sll	r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4603        mtpr	r31, ev5__ps		// Set Ibox current mode to kernel
4604
4605        bis	r11, r31, r12			// Save PS for stack write
4606        bge	r25, CALL_PAL_gentrap_10_	// no stack swap needed if cm=kern
4607
4608        mtpr	r31, ev5__dtb_cm	// Set Mbox current mode to kernel -
4609                                        //     no virt ref for next 2 cycles
4610        mtpr	r30, pt_usp		// save user stack
4611
4612        bis	r31, r31, r11		// Set new PS
4613        mfpr	r30, pt_ksp
4614
4615CALL_PAL_gentrap_10_:
4616        lda	sp, 0-osfsf_c_size(sp)// allocate stack space
4617        mfpr	r14, exc_addr		// get pc
4618
4619        stq	r16, osfsf_a0(sp)	// save regs
4620        bis	r31, osf_a0_gentrap, r16// set a0
4621
4622        stq	r17, osfsf_a1(sp)	// a1
4623        br	r31, bpt_bchk_common	// out of call_pal space
4624
4625
4626// .sbttl	"CALL_PAL OPCDECs"
4627
4628        CALL_PAL_UNPRIV(0x00AB)
4629CallPal_OpcDecAB:
4630        br	r31, osfpal_calpal_opcdec
4631
4632        CALL_PAL_UNPRIV(0x00AC)
4633CallPal_OpcDecAC:
4634        br	r31, osfpal_calpal_opcdec
4635
4636        CALL_PAL_UNPRIV(0x00AD)
4637CallPal_OpcDecAD:
4638        br	r31, osfpal_calpal_opcdec
4639
4640        CALL_PAL_UNPRIV(0x00AE)
4641CallPal_OpcDecAE:
4642        br	r31, osfpal_calpal_opcdec
4643
4644        CALL_PAL_UNPRIV(0x00AF)
4645CallPal_OpcDecAF:
4646        br	r31, osfpal_calpal_opcdec
4647
4648        CALL_PAL_UNPRIV(0x00B0)
4649CallPal_OpcDecB0:
4650        br	r31, osfpal_calpal_opcdec
4651
4652        CALL_PAL_UNPRIV(0x00B1)
4653CallPal_OpcDecB1:
4654        br	r31, osfpal_calpal_opcdec
4655
4656        CALL_PAL_UNPRIV(0x00B2)
4657CallPal_OpcDecB2:
4658        br	r31, osfpal_calpal_opcdec
4659
4660        CALL_PAL_UNPRIV(0x00B3)
4661CallPal_OpcDecB3:
4662        br	r31, osfpal_calpal_opcdec
4663
4664        CALL_PAL_UNPRIV(0x00B4)
4665CallPal_OpcDecB4:
4666        br	r31, osfpal_calpal_opcdec
4667
4668        CALL_PAL_UNPRIV(0x00B5)
4669CallPal_OpcDecB5:
4670        br	r31, osfpal_calpal_opcdec
4671
4672        CALL_PAL_UNPRIV(0x00B6)
4673CallPal_OpcDecB6:
4674        br	r31, osfpal_calpal_opcdec
4675
4676        CALL_PAL_UNPRIV(0x00B7)
4677CallPal_OpcDecB7:
4678        br	r31, osfpal_calpal_opcdec
4679
4680        CALL_PAL_UNPRIV(0x00B8)
4681CallPal_OpcDecB8:
4682        br	r31, osfpal_calpal_opcdec
4683
4684        CALL_PAL_UNPRIV(0x00B9)
4685CallPal_OpcDecB9:
4686        br	r31, osfpal_calpal_opcdec
4687
4688        CALL_PAL_UNPRIV(0x00BA)
4689CallPal_OpcDecBA:
4690        br	r31, osfpal_calpal_opcdec
4691
4692        CALL_PAL_UNPRIV(0x00BB)
4693CallPal_OpcDecBB:
4694        br	r31, osfpal_calpal_opcdec
4695
4696        CALL_PAL_UNPRIV(0x00BC)
4697CallPal_OpcDecBC:
4698        br	r31, osfpal_calpal_opcdec
4699
4700        CALL_PAL_UNPRIV(0x00BD)
4701CallPal_OpcDecBD:
4702        br	r31, osfpal_calpal_opcdec
4703
4704        CALL_PAL_UNPRIV(0x00BE)
4705CallPal_OpcDecBE:
4706        br	r31, osfpal_calpal_opcdec
4707
4708        CALL_PAL_UNPRIV(0x00BF)
4709CallPal_OpcDecBF:
4710        // MODIFIED BY EGH 2/25/04
4711        br	r31, copypal_impl
4712
4713
4714/*======================================================================*/
4715/*                   OSF/1 CALL_PAL CONTINUATION AREA                   */
4716/*======================================================================*/
4717
4718        .text	2
4719
4720        . = 0x4000
4721
4722
4723// .sbttl	"Continuation of MTPR_PERFMON"
4724        ALIGN_BLOCK
4725#if perfmon_debug == 0
4726          // "real" performance monitoring code
4727// mux ctl
4728perfmon_muxctl:
4729        lda     r8, 1(r31) 			// get a 1
4730        sll     r8, pmctr_v_sel0, r8		// move to sel0 position
4731        or      r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8	// build mux select mask
4732        and	r17, r8, r25			// isolate pmctr mux select bits
4733        mfpr	r0, ev5__pmctr
4734        bic	r0, r8, r0			// clear old mux select bits
4735        or	r0,r25, r25			// or in new mux select bits
4736        mtpr	r25, ev5__pmctr
4737
4738        // ok, now tackle cbox mux selects
4739        ldah    r14, 0xfff0(r31)
4740        zap     r14, 0xE0, r14                 // Get Cbox IPR base
4741//orig	get_bc_ctl_shadow	r16		// bc_ctl returned in lower longword
4742// adapted from ev5_pal_macros.mar
4743        mfpr	r16, pt_impure
4744        lda	r16, CNS_Q_IPR(r16)
4745        RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
4746
4747        lda	r8, 0x3F(r31)			// build mux select mask
4748        sll	r8, bc_ctl_v_pm_mux_sel, r8
4749
4750        and	r17, r8, r25			// isolate bc_ctl mux select bits
4751        bic	r16, r8, r16			// isolate old mux select bits
4752        or	r16, r25, r25			// create new bc_ctl
4753        mb					// clear out cbox for future ipr write
4754        stqp	r25, ev5__bc_ctl(r14)		// store to cbox ipr
4755        mb					// clear out cbox for future ipr write
4756
4757//orig	update_bc_ctl_shadow	r25, r16	// r25=value, r16-overwritten with adjusted impure ptr
4758// adapted from ev5_pal_macros.mar
4759        mfpr	r16, pt_impure
4760        lda	r16, CNS_Q_IPR(r16)
4761        SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
4762
4763        br 	r31, perfmon_success
4764
4765
4766// requested to disable perf monitoring
4767perfmon_dis:
4768        mfpr	r14, ev5__pmctr		// read ibox pmctr ipr
4769perfmon_dis_ctr0:			// and begin with ctr0
4770        blbc	r17, perfmon_dis_ctr1	// do not disable ctr0
4771        lda 	r8, 3(r31)
4772        sll	r8, pmctr_v_ctl0, r8
4773        bic	r14, r8, r14		// disable ctr0
4774perfmon_dis_ctr1:
4775        srl	r17, 1, r17
4776        blbc	r17, perfmon_dis_ctr2	// do not disable ctr1
4777        lda 	r8, 3(r31)
4778        sll	r8, pmctr_v_ctl1, r8
4779        bic	r14, r8, r14		// disable ctr1
4780perfmon_dis_ctr2:
4781        srl	r17, 1, r17
4782        blbc	r17, perfmon_dis_update	// do not disable ctr2
4783        lda 	r8, 3(r31)
4784        sll	r8, pmctr_v_ctl2, r8
4785        bic	r14, r8, r14		// disable ctr2
4786perfmon_dis_update:
4787        mtpr	r14, ev5__pmctr		// update pmctr ipr
4788//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
4789// adapted from ev5_pal_macros.mar
4790//orig	get_pmctr_ctl	r8, r25		// pmctr_ctl bit in r8.  adjusted impure pointer in r25
4791        mfpr	r25, pt_impure
4792        lda	r25, CNS_Q_IPR(r25)
4793        RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
4794
4795        lda	r17, 0x3F(r31)		// build mask
4796        sll	r17, pmctr_v_ctl2, r17 // shift mask to correct position
4797        and 	r14, r17, r14		// isolate ctl bits
4798        bic	r8, r17, r8		// clear out old ctl bits
4799        or	r14, r8, r14		// create shadow ctl bits
4800//orig	store_reg1 pmctr_ctl, r14, r25, ipr=1	// update pmctr_ctl register
4801//adjusted impure pointer still in r25
4802        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
4803
4804        br 	r31, perfmon_success
4805
4806
4807// requested to enable perf monitoring
4808//;the following code can be greatly simplified for pass2, but should work fine as is.
4809
4810
4811perfmon_enclr:
4812        lda	r9, 1(r31)		// set enclr flag
4813        br perfmon_en_cont
4814
4815perfmon_en:
4816        bis	r31, r31, r9		// clear enclr flag
4817
4818perfmon_en_cont:
4819        mfpr	r8, pt_pcbb		// get PCB base
4820//orig	get_pmctr_ctl r25, r25
4821        mfpr	r25, pt_impure
4822        lda	r25, CNS_Q_IPR(r25)
4823        RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
4824
4825        ldqp	r16, osfpcb_q_fen(r8)	// read DAT/PME/FEN quadword
4826        mfpr	r14, ev5__pmctr		// read ibox pmctr ipr
4827        srl 	r16, osfpcb_v_pme, r16	// get pme bit
4828        mfpr	r13, icsr
4829        and	r16,  1, r16		// isolate pme bit
4830
4831        // this code only needed in pass2 and later
4832//orig	sget_addr	r12, 1<<icsr_v_pmp, r31
4833        lda	r12, 1<<icsr_v_pmp(r31)		// pb
4834        bic	r13, r12, r13		// clear pmp bit
4835        sll	r16, icsr_v_pmp, r12	// move pme bit to icsr<pmp> position
4836        or	r12, r13, r13		// new icsr with icsr<pmp> bit set/clear
4837        ev5_pass2 	mtpr	r13, icsr		// update icsr
4838
4839#if ev5_p1 != 0
4840        lda	r12, 1(r31)
4841        cmovlbc	r25, r12, r16		// r16<0> set if either pme=1 or sprocess=0 (sprocess in bit 0 of r25)
4842#else
4843        bis	r31, 1, r16		// set r16<0> on pass2 to update pmctr always (icsr provides real enable)
4844#endif
4845
4846        sll	r25, 6, r25		// shift frequency bits into pmctr_v_ctl positions
4847        bis	r14, r31, r13		// copy pmctr
4848
4849perfmon_en_ctr0:			// and begin with ctr0
4850        blbc	r17, perfmon_en_ctr1	// do not enable ctr0
4851
4852        blbc	r9, perfmon_en_noclr0	// enclr flag set, clear ctr0 field
4853        lda	r8, 0xffff(r31)
4854        zapnot  r8, 3, r8		// ctr0<15:0> mask
4855        sll	r8, pmctr_v_ctr0, r8
4856        bic	r14, r8, r14		// clear ctr bits
4857        bic	r13, r8, r13		// clear ctr bits
4858
4859perfmon_en_noclr0:
4860//orig	get_addr r8, 3<<pmctr_v_ctl0, r31
4861        LDLI(r8, (3<<pmctr_v_ctl0))
4862        and 	r25, r8, r12		//isolate frequency select bits for ctr0
4863        bic	r14, r8, r14		// clear ctl0 bits in preparation for enabling
4864        or	r14,r12,r14		// or in new ctl0 bits
4865
4866perfmon_en_ctr1:			// enable ctr1
4867        srl	r17, 1, r17		// get ctr1 enable
4868        blbc	r17, perfmon_en_ctr2	// do not enable ctr1
4869
4870        blbc	r9, perfmon_en_noclr1   // if enclr flag set, clear ctr1 field
4871        lda	r8, 0xffff(r31)
4872        zapnot  r8, 3, r8		// ctr1<15:0> mask
4873        sll	r8, pmctr_v_ctr1, r8
4874        bic	r14, r8, r14		// clear ctr bits
4875        bic	r13, r8, r13		// clear ctr bits
4876
4877perfmon_en_noclr1:
4878//orig	get_addr r8, 3<<pmctr_v_ctl1, r31
4879        LDLI(r8, (3<<pmctr_v_ctl1))
4880        and 	r25, r8, r12		//isolate frequency select bits for ctr1
4881        bic	r14, r8, r14		// clear ctl1 bits in preparation for enabling
4882        or	r14,r12,r14		// or in new ctl1 bits
4883
4884perfmon_en_ctr2:			// enable ctr2
4885        srl	r17, 1, r17		// get ctr2 enable
4886        blbc	r17, perfmon_en_return	// do not enable ctr2 - return
4887
4888        blbc	r9, perfmon_en_noclr2	// if enclr flag set, clear ctr2 field
4889        lda	r8, 0x3FFF(r31)		// ctr2<13:0> mask
4890        sll	r8, pmctr_v_ctr2, r8
4891        bic	r14, r8, r14		// clear ctr bits
4892        bic	r13, r8, r13		// clear ctr bits
4893
4894perfmon_en_noclr2:
4895//orig	get_addr r8, 3<<pmctr_v_ctl2, r31
4896        LDLI(r8, (3<<pmctr_v_ctl2))
4897        and 	r25, r8, r12		//isolate frequency select bits for ctr2
4898        bic	r14, r8, r14		// clear ctl2 bits in preparation for enabling
4899        or	r14,r12,r14		// or in new ctl2 bits
4900
4901perfmon_en_return:
4902        cmovlbs	r16, r14, r13		// if pme enabled, move enables into pmctr
4903                                        // else only do the counter clears
4904        mtpr	r13, ev5__pmctr		// update pmctr ipr
4905
4906//;this code not needed for pass2 and later, but does not hurt to leave it in
4907        lda	r8, 0x3F(r31)
4908//orig	get_pmctr_ctl r25, r12         	// read pmctr ctl; r12=adjusted impure pointer
4909        mfpr	r12, pt_impure
4910        lda	r12, CNS_Q_IPR(r12)
4911        RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
4912
4913        sll	r8, pmctr_v_ctl2, r8	// build ctl mask
4914        and	r8, r14, r14		// isolate new ctl bits
4915        bic	r25, r8, r25		// clear out old ctl value
4916        or	r25, r14, r14		// create new pmctr_ctl
4917//orig	store_reg1 pmctr_ctl, r14, r12, ipr=1
4918        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4919
4920        br 	r31, perfmon_success
4921
4922
4923// options...
4924perfmon_ctl:
4925
4926// set mode
4927//orig	get_pmctr_ctl r14, r12         	// read shadow pmctr ctl; r12=adjusted impure pointer
4928        mfpr	r12, pt_impure
4929        lda	r12, CNS_Q_IPR(r12)
4930        RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4931
4932//orig	get_addr r8, (1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk), r31          // build mode mask for pmctr register
4933        LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
4934        mfpr	r0, ev5__pmctr
4935        and	r17, r8, r25			// isolate pmctr mode bits
4936        bic	r0, r8, r0			// clear old mode bits
4937        or	r0, r25, r25			// or in new mode bits
4938        mtpr	r25, ev5__pmctr
4939
4940//;the following code will only be used in pass2, but should not hurt anything if run in pass1.
4941        mfpr	r8, icsr
4942        lda	r25, 1<<icsr_v_pma(r31)		// set icsr<pma> if r17<0>=0
4943        bic 	r8, r25, r8			// clear old pma bit
4944        cmovlbs r17, r31, r25			// and clear icsr<pma> if r17<0>=1
4945        or	r8, r25, r8
4946        ev5_pass2 mtpr	r8, icsr		// 4 bubbles to hw_rei
4947        mfpr	r31, pt0			// pad icsr write
4948        mfpr	r31, pt0			// pad icsr write
4949
4950//;the following code not needed for pass2 and later, but should work anyway.
4951        bis     r14, 1, r14       		// set for select processes
4952        blbs	r17, perfmon_sp			// branch if select processes
4953        bic	r14, 1, r14			// all processes
4954perfmon_sp:
4955//orig	store_reg1 pmctr_ctl, r14, r12, ipr=1   // update pmctr_ctl register
4956        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4957        br 	r31, perfmon_success
4958
4959// counter frequency select
4960perfmon_freq:
4961//orig	get_pmctr_ctl r14, r12         	// read shadow pmctr ctl; r12=adjusted impure pointer
4962        mfpr	r12, pt_impure
4963        lda	r12, CNS_Q_IPR(r12)
4964        RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4965
4966        lda	r8, 0x3F(r31)
4967//orig	sll	r8, pmctr_ctl_v_frq2, r8		// build mask for frequency select field
4968// I guess this should be a shift of 4 bits from the above control register structure	.. pb
4969#define	pmctr_ctl_v_frq2_SHIFT 4
4970        sll	r8, pmctr_ctl_v_frq2_SHIFT, r8		// build mask for frequency select field
4971
4972        and 	r8, r17, r17
4973        bic 	r14, r8, r14				// clear out old frequency select bits
4974
4975        or 	r17, r14, r14				// or in new frequency select info
4976//orig	store_reg1 pmctr_ctl, r14, r12, ipr=1   // update pmctr_ctl register
4977        SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4978
4979        br 	r31, perfmon_success
4980
4981// read counters
4982perfmon_rd:
4983        mfpr	r0, ev5__pmctr
4984        or	r0, 1, r0	// or in return status
4985        hw_rei			// back to user
4986
4987// write counters
4988perfmon_wr:
4989        mfpr	r14, ev5__pmctr
4990        lda	r8, 0x3FFF(r31)		// ctr2<13:0> mask
4991        sll	r8, pmctr_v_ctr2, r8
4992
4993//orig	get_addr r9, 0xFFFFFFFF, r31, verify=0	// ctr2<15:0>,ctr1<15:0> mask
4994        LDLI(r9, (0xFFFFFFFF))
4995        sll	r9, pmctr_v_ctr1, r9
4996        or	r8, r9, r8		// or ctr2, ctr1, ctr0 mask
4997        bic	r14, r8, r14		// clear ctr fields
4998        and	r17, r8, r25		// clear all but ctr  fields
4999        or	r25, r14, r14		// write ctr fields
5000        mtpr	r14, ev5__pmctr		// update pmctr ipr
5001
5002        mfpr	r31, pt0		// pad pmctr write (needed only to keep PVC happy)
5003
5004perfmon_success:
5005        or      r31, 1, r0                     // set success
5006        hw_rei					// back to user
5007
5008perfmon_unknown:
5009        or	r31, r31, r0		// set fail
5010        hw_rei				// back to user
5011
5012#else
5013
5014// end of "real code", start of debug code
5015
5016//+
5017// Debug environment:
5018// (in pass2, always set icsr<pma> to ensure master counter enable is on)
5019// 	R16 = 0		Write to on-chip performance monitor ipr
5020//	   r17 = 	  on-chip ipr
5021//	   r0 = 	  return value of read of on-chip performance monitor ipr
5022//	R16 = 1		Setup Cbox mux selects
5023//	   r17 = 	  Cbox mux selects in same position as in bc_ctl ipr.
5024//	   r0 = 	  return value of read of on-chip performance monitor ipr
5025//
5026//-
5027pal_perfmon_debug:
5028        mfpr	r8, icsr
5029        lda	r9, 1<<icsr_v_pma(r31)
5030        bis	r8, r9, r8
5031        mtpr	r8, icsr
5032
5033        mfpr	r0,  ev5__pmctr		// read old value
5034        bne	r16, cbox_mux_sel
5035
5036        mtpr	r17, ev5__pmctr		// update pmctr ipr
5037        br	r31, end_pm
5038
5039cbox_mux_sel:
5040        // ok, now tackle cbox mux selects
5041        ldah    r14, 0xfff0(r31)
5042        zap     r14, 0xE0, r14                 // Get Cbox IPR base
5043//orig	get_bc_ctl_shadow	r16		// bc_ctl returned
5044        mfpr	r16, pt_impure
5045        lda	r16, CNS_Q_IPR(r16)
5046        RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
5047
5048        lda	r8, 0x3F(r31)			// build mux select mask
5049        sll	r8, BC_CTL_V_PM_MUX_SEL, r8
5050
5051        and	r17, r8, r25			// isolate bc_ctl mux select bits
5052        bic	r16, r8, r16			// isolate old mux select bits
5053        or	r16, r25, r25			// create new bc_ctl
5054        mb					// clear out cbox for future ipr write
5055        stqp	r25, ev5__bc_ctl(r14)		// store to cbox ipr
5056        mb					// clear out cbox for future ipr write
5057//orig	update_bc_ctl_shadow	r25, r16	// r25=value, r16-overwritten with adjusted impure ptr
5058        mfpr	r16, pt_impure
5059        lda	r16, CNS_Q_IPR(r16)
5060        SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
5061
5062end_pm:	hw_rei
5063
5064#endif
5065
5066
5067//;The following code is a workaround for a cpu bug where Istream prefetches to
5068//;super-page address space in user mode may escape off-chip.
5069#if spe_fix != 0
5070
5071        ALIGN_BLOCK
5072hw_rei_update_spe:
5073        mfpr	r12, pt_misc			// get previous mode
5074        srl	r11, osfps_v_mode, r10		// isolate current mode bit
5075        and	r10, 1, r10
5076        extbl	r12, 7, r8			// get previous mode field
5077        and	r8, 1, r8	 		// isolate previous mode bit
5078        cmpeq	r10, r8, r8			// compare previous and current modes
5079        beq	r8, hw_rei_update_spe_5_
5080        hw_rei					// if same, just return
5081
5082hw_rei_update_spe_5_:
5083
5084#if fill_err_hack != 0
5085
5086        fill_error_hack
5087#endif
5088
5089        mfpr	r8, icsr			// get current icsr value
5090        ldah	r9, (2<<(icsr_v_spe-16))(r31)	// get spe bit mask
5091        bic	r8, r9, r8			// disable spe
5092        xor	r10, 1, r9			// flip mode for new spe bit
5093        sll	r9, icsr_v_spe+1, r9		// shift into position
5094        bis	r8, r9, r8			// enable/disable spe
5095        lda	r9, 1(r31)			// now update our flag
5096        sll	r9, pt_misc_v_cm, r9		// previous mode saved bit mask
5097        bic	r12, r9, r12			// clear saved previous mode
5098        sll	r10, pt_misc_v_cm, r9		// current mode saved bit mask
5099        bis	r12, r9, r12			// set saved current mode
5100        mtpr	r12, pt_misc			// update pt_misc
5101        mtpr	r8, icsr			// update icsr
5102
5103#if osf_chm_fix != 0
5104
5105
5106        blbc	r10, hw_rei_update_spe_10_			// branch if not user mode
5107
5108        mb					// ensure no outstanding fills
5109        lda	r12, 1<<dc_mode_v_dc_ena(r31)	// User mode
5110        mtpr	r12, dc_mode			// Turn on dcache
5111        mtpr	r31, dc_flush			// and flush it
5112        br	r31, pal_ic_flush
5113
5114hw_rei_update_spe_10_:	mfpr	r9, pt_pcbb			// Kernel mode
5115        ldqp	r9, osfpcb_q_Fen(r9)		// get FEN
5116        blbc	r9, pal_ic_flush		// return if FP disabled
5117        mb					// ensure no outstanding fills
5118        mtpr	r31, dc_mode			// turn off dcache
5119#endif
5120
5121
5122        br	r31, pal_ic_flush		// Pal restriction - must flush Icache if changing ICSR<SPE>
5123#endif
5124
5125
5126copypal_impl:
5127        mov r16, r0
5128        ble r18, finished	#if len <=0 we are finished
5129        ldq_u r8, 0(r17)
5130        xor r17, r16, r9
5131        and r9, 7, r9
5132        and r16, 7, r10
5133        bne r9, unaligned
5134        beq r10, aligned
5135        ldq_u r9, 0(r16)
5136        addq r18, r10, r18
5137        mskqh r8, r17, r8
5138        mskql r9, r17, r9
5139        bis r8, r9, r8
5140aligned:
5141        subq r18, 1, r10
5142        bic r10, 7, r10
5143        and r18, 7, r18
5144        beq r10, aligned_done
5145loop:
5146        stq_u r8, 0(r16)
5147        ldq_u r8, 8(r17)
5148        subq r10, 8, r10
5149        lda r16,8(r16)
5150        lda r17,8(r17)
5151        bne r10, loop
5152aligned_done:
5153        bne r18, few_left
5154        stq_u r8, 0(r16)
5155        br r31, finished
5156        few_left:
5157        mskql r8, r18, r10
5158        ldq_u r9, 0(r16)
5159        mskqh r9, r18, r9
5160        bis r10, r9, r10
5161        stq_u r10, 0(r16)
5162        br r31, finished
5163unaligned:
5164        addq r17, r18, r25
5165        cmpule r18, 8, r9
5166        bne r9, unaligned_few_left
5167        beq r10, unaligned_dest_aligned
5168        and r16, 7, r10
5169        subq r31, r10, r10
5170        addq r10, 8, r10
5171        ldq_u r9, 7(r17)
5172        extql r8, r17, r8
5173        extqh r9, r17, r9
5174        bis r8, r9, r12
5175        insql r12, r16, r12
5176        ldq_u r13, 0(r16)
5177        mskql r13, r16, r13
5178        bis r12, r13, r12
5179        stq_u r12, 0(r16)
5180        addq r16, r10, r16
5181        addq r17, r10, r17
5182        subq r18, r10, r18
5183        ldq_u r8, 0(r17)
5184unaligned_dest_aligned:
5185        subq r18, 1, r10
5186        bic r10, 7, r10
5187        and r18, 7, r18
5188        beq r10, unaligned_partial_left
5189unaligned_loop:
5190        ldq_u r9, 7(r17)
5191        lda r17, 8(r17)
5192        extql r8, r17, r12
5193        extqh r9, r17, r13
5194        subq r10, 8, r10
5195        bis r12, r13, r13
5196        stq r13, 0(r16)
5197        lda r16, 8(r16)
5198        beq r10, unaligned_second_partial_left
5199        ldq_u r8, 7(r17)
5200        lda r17, 8(r17)
5201        extql r9, r17, r12
5202        extqh r8, r17, r13
5203        bis r12, r13, r13
5204        subq r10, 8, r10
5205        stq r13, 0(r16)
5206        lda r16, 8(r16)
5207        bne r10, unaligned_loop
5208unaligned_partial_left:
5209        mov r8, r9
5210unaligned_second_partial_left:
5211        ldq_u r8, -1(r25)
5212        extql r9, r17, r9
5213        extqh r8, r17, r8
5214        bis r8, r9, r8
5215        bne r18, few_left
5216        stq_u r8, 0(r16)
5217        br r31, finished
5218unaligned_few_left:
5219        ldq_u r9, -1(r25)
5220        extql r8, r17, r8
5221        extqh r9, r17, r9
5222        bis r8, r9, r8
5223        insqh r8, r16, r9
5224        insql r8, r16, r8
5225        lda r12, -1(r31)
5226        mskql r12, r18, r13
5227        cmovne r13, r13, r12
5228        insqh r12, r16, r13
5229        insql r12, r16, r12
5230        addq r16, r18, r10
5231        ldq_u r14, 0(r16)
5232        ldq_u r25, -1(r10)
5233        bic r14, r12, r14
5234        bic r25, r13, r25
5235        and r8, r12, r8
5236        and r9, r13, r9
5237        bis r8, r14, r8
5238        bis r9, r25, r9
5239        stq_u r9, -1(r10)
5240        stq_u r8, 0(r16)
5241finished:
5242        hw_rei
5243