osfpal.S (8007:013cbe16f1d6) osfpal.S (8012:2f71125bf413)
1/*
2 * Copyright (c) 2003, 2004
3 * The Regents of The University of Michigan
4 * All Rights Reserved
5 *
6 * This code is part of the M5 simulator, developed by Nathan Binkert,
7 * Erik Hallnor, Steve Raasch, and Steve Reinhardt, with contributions
8 * from Ron Dreslinski, Dave Greene, Lisa Hsu, Ali Saidi, and Andrew
9 * Schultz.
10 *
11 * Permission is granted to use, copy, create derivative works and
12 * redistribute this software and such derivative works for any
13 * purpose, so long as the copyright notice above, this grant of
14 * permission, and the disclaimer below appear in all copies made; and
15 * so long as the name of The University of Michigan is not used in
16 * any advertising or publicity pertaining to the use or distribution
17 * of this software without specific, written prior authorization.
18 *
19 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE
20 * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND
21 * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE
25 * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT,
26 * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM
27 * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
28 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH
29 * DAMAGES.
30 */
31
32/*
33Copyright 1992, 1993, 1994, 1995 Hewlett-Packard Development Company, L.P.
34
35Permission is hereby granted, free of charge, to any person obtaining a copy of
36this software and associated documentation files (the "Software"), to deal in
37the Software without restriction, including without limitation the rights to
38use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
39of the Software, and to permit persons to whom the Software is furnished to do
40so, subject to the following conditions:
41
42The above copyright notice and this permission notice shall be included in all
43copies or substantial portions of the Software.
44
45THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
46IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
47FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
48AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
49LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
50OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
51SOFTWARE.
52*/
53
1// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
2// since we don't have a mechanism to expand the data structures.... pb Nov/95
3
4// build_fixed_image: not sure what means
5// real_mm to be replaced during rewrite
6// remove_save_state remove_restore_state can be remooved to save space ??
7
8
9#include "ev5_defs.h"
10#include "ev5_impure.h"
11#include "ev5_alpha_defs.h"
12#include "ev5_paldef.h"
13#include "ev5_osfalpha_defs.h"
14#include "fromHudsonMacros.h"
15#include "fromHudsonOsf.h"
16#include "dc21164FromGasSources.h"
17
18#ifdef SIMOS
19#define DEBUGSTORE(c) nop
20#else
21#define DEBUGSTORE(c) \
22 lda r13, c(zero) ; \
23 bsr r25, debugstore
24#endif
25
26#define DEBUG_EXC_ADDR()\
27 bsr r25, put_exc_addr; \
28 DEBUGSTORE(13) ; \
29 DEBUGSTORE(10)
30
31#define egore 0
32#define acore 0
33#define beh_model 0
34#define ev5_p2 1
35#define ev5_p1 0
36#define ldvpte_bug_fix 1
37#define osf_chm_fix 0
38
39// Do we want to do this?? pb
40#define spe_fix 0
41// Do we want to do this?? pb
42#define build_fixed_image 0
43
44#define ev5_pass2
45#define enable_p4_fixups 0
46#define osf_svmin 1
47#define enable_physical_console 0
48#define fill_err_hack 0
49#define icflush_on_tbix 0
50#define max_cpuid 1
51#define perfmon_debug 0
52#define rawhide_system 0
53#define rax_mode 0
54
55
56// This is the fix for the user-mode super page references causing the machine to crash.
57#if (spe_fix == 1) && (build_fixed_image==1)
58#define hw_rei_spe br r31, hw_rei_update_spe
59#else
60#define hw_rei_spe hw_rei
61#endif
62
63
64// redefine a few of the distribution-code names to match the Hudson gas names.
65// opcodes
66#define ldqp ldq_p
67#define stqp stq_p
68#define ldlp ldl_p
69#define stlp stl_p
70
71#define r0 $0
72#define r1 $1
73#define r2 $2
74#define r3 $3
75#define r4 $4
76#define r5 $5
77#define r6 $6
78#define r7 $7
79#define r8 $8
80#define r9 $9
81#define r10 $10
82#define r11 $11
83#define r12 $12
84#define r13 $13
85#define r14 $14
86#define r15 $15
87#define r16 $16
88#define r17 $17
89#define r18 $18
90#define r19 $19
91#define r20 $20
92#define r21 $21
93#define r22 $22
94#define r23 $23
95#define r24 $24
96#define r25 $25
97#define r26 $26
98#define r27 $27
99#define r28 $28
100#define r29 $29
101#define r30 $30
102#define r31 $31
103
104// .title "EV5 OSF PAL"
105// .ident "V1.18"
106//
107//****************************************************************************
108//* *
109//* Copyright (c) 1992, 1993, 1994, 1995 *
110//* by DIGITAL Equipment Corporation, Maynard, Mass. *
111//* *
112//* This software is furnished under a license and may be used and copied *
113//* only in accordance with the terms of such license and with the *
114//* inclusion of the above copyright notice. This software or any other *
115//* copies thereof may not be provided or otherwise made available to any *
116//* other person. No title to and ownership of the software is hereby *
117//* transferred. *
118//* *
119//* The information in this software is subject to change without notice *
120//* and should not be construed as a commitment by DIGITAL Equipment *
121//* Corporation. *
122//* *
123//* DIGITAL assumes no responsibility for the use or reliability of its *
124//* software on equipment which is not supplied by DIGITAL. *
125//* *
126//****************************************************************************
127
128// .sbttl "Edit History"
129//+
130// Who Rev When What
131// ------------ --- ----------- --------------------------------
132// DB 0.0 03-Nov-1992 Start
133// DB 0.1 28-Dec-1992 add swpctx
134// DB 0.2 05-Jan-1993 Bug: PVC found mtpr dtb_CM -> virt ref bug
135// DB 0.3 11-Jan-1993 rearrange trap entry points
136// DB 0.4 01-Feb-1993 add tbi
137// DB 0.5 04-Feb-1993 real MM, kludge reset flow, kludge swppal
138// DB 0.6 09-Feb-1993 Bug: several stack pushers used r16 for pc (should be r14)
139// DB 0.7 10-Feb-1993 Bug: pushed wrong PC (+8) on CALL_PAL OPCDEC
140// Bug: typo on register number for store in wrunique
141// Bug: rti to kern uses r16 as scratch
142// Bug: callsys saving wrong value in pt_usp
143// DB 0.8 16-Feb-1993 PVC: fix possible pt write->read bug in wrkgp, wrusp
144// DB 0.9 18-Feb-1993 Bug: invalid_dpte_handler shifted pte twice
145// Bug: rti stl_c could corrupt the stack
146// Bug: unaligned returning wrong value in r17 (or should be and)
147// DB 0.10 19-Feb-1993 Add draina, rd/wrmces, cflush, cserve, interrupt
148// DB 0.11 23-Feb-1993 Turn caches on in reset flow
149// DB 0.12 10-Mar-1993 Bug: wrong value for icsr for FEN in kern mode flow
150// DB 0.13 15-Mar-1993 Bug: wrong value pushed for PC in invalid_dpte_handler if stack push tbmisses
151// DB 0.14 23-Mar-1993 Add impure pointer paltemp, reshuffle some other paltemps to match VMS
152// DB 0.15 15-Apr-1993 Combine paltemps for WHAMI and MCES
153// DB 0.16 12-May-1993 Update reset
154// New restriction: no mfpr exc_addr in cycle 1 of call_pal flows
155// Bug: in wrmces, not clearing DPC, DSC
156// Update swppal
157// Add pal bugchecks, pal_save_state, pal_restore_state
158// DB 0.17 24-May-1993 Add dfault_in_pal flow; fixup stack builder to have common state for pc/ps.
159// New restriction: No hw_rei_stall in 0,1,2 after mtpr itb_asn
160// DB 0.18 26-May-1993 PVC fixes
161// JM 0.19 01-jul-1993 Bug: OSFPAL_CALPAL_OPCDEC, TRAP_OPCDEC -- move mt exc_addr after stores
162// JM 0.20 07-jul-1993 Update cns_ and mchk_ names for impure.mar conversion to .sdl
163// Bug: exc_addr was being loaded before stores that could dtb_miss in the following
164// routines: TRAP_FEN,FEN_TO_OPCDEC,CALL_PAL_CALLSYS,RTI_TO_KERN
165// JM 0.21 26-jul-1993 Bug: move exc_addr load after ALL stores in the following routines:
166// TRAP_IACCVIO::,TRAP_OPCDEC::,TRAP_ARITH::,TRAP_FEN::
167// dfault_trap_cont:,fen_to_opcdec:,invalid_dpte_handler:
168// osfpal_calpal_opcdec:,CALL_PAL_callsys::,TRAP_UNALIGN::
169// Bugs from PVC: trap_unalign - mt pt0 ->mf pt0 within 2 cycles
170// JM 0.22 28-jul-1993 Add WRIPIR instruction
171// JM 0.23 05-aug-1993 Bump version number for release
172// JM 0.24 11-aug-1993 Bug: call_pal_swpipl - palshadow write -> hw_rei violation
173// JM 0.25 09-sep-1993 Disable certain "hidden" pvc checks in call_pals;
174// New restriction: No hw_rei_stall in 0,1,2,3,4 after mtpr itb_asn - affects HALT(raxmode),
175// and SWPCTX
176// JM 0.26 07-oct-1993 Re-implement pal_version
177// JM 0.27 12-oct-1993 One more time: change pal_version format to conform to SRM
178// JM 0.28 14-oct-1993 Change ic_flush routine to pal_ic_flush
179// JM 0.29 19-oct-1993 BUG(?): dfault_in_pal: use exc_addr to check for dtbmiss,itbmiss check instead
180// of mm_stat<opcode>. mm_stat contains original opcode, not hw_ld.
181// JM 0.30 28-oct-1993 BUG: PVC violation - mf exc_addr in first cycles of call_pal in rti,retsys
182// JM 0.31 15-nov-1993 BUG: WRFEN trashing r0
183// JM 0.32 21-nov-1993 BUG: dtb_ldq,itb_ldq (used in dfault_in_pal) not defined when real_mm=0
184// JM 0.33 24-nov-1993 save/restore_state -
185// BUG: use ivptbr to restore mvptbr
186// BUG: adjust hw_ld/st base/offsets to accomodate 10-bit offset limit
187// CHANGE: Load 2 pages into dtb to accomodate compressed logout area/multiprocessors
188// JM 0.34 20-dec-1993 BUG: set r11<mode> to kernel for ksnv halt case
189// BUG: generate ksnv halt when tb miss on kernel stack accesses
190// save exc_addr in r14 for invalid_dpte stack builder
191// JM 0.35 30-dec-1993 BUG: PVC violation in trap_arith - mt exc_sum in shadow of store with mf exc_mask in
192// the same shadow
193// JM 0.36 6-jan-1994 BUG: fen_to_opcdec - savePC should be PC+4, need to save old PS, update new PS
194// New palcode restiction: mt icsr<fpe,hwe> --> 3 bubbles to hw_rei --affects wrfen
195// JM 0.37 25-jan-1994 BUG: PVC violations in restore_state - mt dc_mode/maf_mode ->mbox instructions
196// Hide impure area manipulations in macros
197// BUG: PVC violation in save and restore state-- move mt icsr out of shadow of ld/st
198// Add some pvc_violate statements
199// JM 0.38 1-feb-1994 Changes to save_state: save pt1; don't save r31,f31; update comments to reflect reality;
200// Changes to restore_state: restore pt1, icsr; don't restore r31,f31; update comments
201// Add code to ensure fen bit set in icsr before ldt
202// conditionally compile rax_more_reset out.
203// move ldqp,stqp macro definitions to ev5_pal_macros.mar and add .mcall's for them here
204// move rax reset stuff to ev5_osf_system_pal.m64
205// JM 0.39 7-feb-1994 Move impure pointer to pal scratch space. Use former pt_impure for bc_ctl shadow
206// and performance monitoring bits
207// Change to save_state routine to save more iprs.
208// JM 0.40 19-feb-1994 Change algorithm in save/restore_state routines; add f31,r31 back in
209// JM 0.41 21-feb-1994 Add flags to compile out save/restore state (not needed in some systems)
210// remove_save_state,remove_restore_state;fix new pvc violation in save_state
211// JM 0.42 22-feb-1994 BUG: save_state overwriting r3
212// JM 0.43 24-feb-1994 BUG: save_state saving wrong icsr
213// JM 0.44 28-feb-1994 Remove ic_flush from wr_tbix instructions
214// JM 0.45 15-mar-1994 BUG: call_pal_tbi trashes a0 prior to range check (instruction order problem)
215// New pal restriction in pal_restore_state: icsr<fpe>->floating instr = 3 bubbles
216// Add exc_sum and exc_mask to pal_save_state (not restore)
217// JM 0.46 22-apr-1994 Move impure pointer back into paltemp; Move bc_ctl shadow and pmctr_ctl into impure
218// area.
219// Add performance counter support to swpctx and wrperfmon
220// JM 0.47 9-may-1994 Bump version # (for ev5_osf_system_pal.m64 sys_perfmon fix)
221// JM 0.48 13-jun-1994 BUG: trap_interrupt --> put new ev5 ipl at 30 for all osfipl6 interrupts
222// JM 0.49 8-jul-1994 BUG: In the unlikely (impossible?) event that the branch to pal_pal_bug_check is
223// taken in the interrupt flow, stack is pushed twice.
224// SWPPAL - update to support ECO 59 to allow 0 as a valid address
225// Add itb flush to save/restore state routines
226// Change hw_rei to hw_rei_stall in ic_flush routine. Shouldn't be necessary, but
227// conforms to itbia restriction.
228// Added enable_physical_console flag (for enter/exit console routines only)
229// JM 0.50 29-jul-1994 Add code to dfault & invalid_dpte_handler to ignore exceptions on a
230// load to r31/f31. changed dfault_fetch_err to dfault_fetch_ldr31_err and
231// nmiss_fetch_err to nmiss_fetch_ldr31_err.
232// JM 1.00 1-aug-1994 Add pass2 support (swpctx)
233// JM 1.01 2-aug-1994 swppal now passes bc_ctl/bc_config in r1/r2
234// JM 1.02 15-sep-1994 BUG: swpctx missing shift of pme bit to correct position in icsr (pass2)
235// Moved perfmon code here from system file.
236// BUG: pal_perfmon - enable function not saving correct enables when pme not set (pass1)
237// JM 1.03 3-oct-1994 Added (pass2 only) code to wrperfmon enable function to look at pme bit.
238// JM 1.04 14-oct-1994 BUG: trap_interrupt - ISR read (and saved) before INTID -- INTID can change
239// after ISR read, but we won't catch the ISR update. reverse order
240// JM 1.05 17-nov-1994 Add code to dismiss UNALIGN trap if LD r31/F31
241// JM 1.06 28-nov-1994 BUG: missing mm_stat shift for store case in trap_unalign (new bug due to "dismiss" code)
242// JM 1.07 1-dec-1994 EV5 PASS1,2,3 BUG WORKAROUND: Add flag LDVPTE_BUG_FIX. In DTBMISS_DOUBLE, branch to
243// DTBMISS_SINGLE if not in palmode.
244// JM 1.08 9-jan-1995 Bump version number for change to EV5_OSF_SYSTEM_PAL.M64 - ei_stat fix in mchk logout frame
245// JM 1.09 2-feb-1995 Add flag "spe_fix" and accompanying code to workaround pre-pass4 bug: Disable Ibox
246// superpage mode in User mode and re-enable in kernel mode.
247// EV5_OSF_SYSTEM_PAL.M64 and EV5_PALDEF.MAR (added pt_misc_v_cm) also changed to support this.
248// JM 1.10 24-feb-1995 Set ldvpte_bug_fix regardless of ev5 pass. set default to ev5_p2
249// ES 1.11 10-mar-1995 Add flag "osf_chm_fix" to enable dcache in user mode only to avoid
250// cpu bug.
251// JM 1.12 17-mar-1995 BUG FIX: Fix F0 corruption problem in pal_restore_state
252// ES 1.13 17-mar-1995 Refine osf_chm_fix
253// ES 1.14 20-mar-1995 Don't need as many stalls before hw_rei_stall in chm_fix
254// ES 1.15 21-mar-1995 Add a stall to avoid a pvc violation in pal_restore_state
255// Force pvc checking of exit_console
256// ES 1.16 26-apr-1995 In the wrperfmon disable function, correct meaning of R17<2:0> to ctl2,ctl2,ctl0
257// ES 1.17 01-may-1995 In hw_rei_update_spe code, in the osf_chm fix, use bic and bis (self-correcting)
258// instead of xor to maintain previous mode in pt_misc
259// ES 1.18 14-jul-1995 In wrperfmon enable on pass2, update pmctr even if current process does
260// not have pme set. The bits in icsr maintain the master enable state.
261// In sys_reset, add icsr<17>=1 for ev56 byte/word eco enable
262//
263#define vmaj 1
264#define vmin 18
265#define vms_pal 1
266#define osf_pal 2
267#define pal_type osf_pal
268#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
269//-
270
271// .sbttl "PALtemp register usage"
272
273//+
274// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
275// for these PALtemps:
276//
277// pt0 local scratch
278// pt1 local scratch
279// pt2 entUna pt_entUna
280// pt3 CPU specific impure area pointer pt_impure
281// pt4 memory management temp
282// pt5 memory management temp
283// pt6 memory management temp
284// pt7 entIF pt_entIF
285// pt8 intmask pt_intmask
286// pt9 entSys pt_entSys
287// pt10
288// pt11 entInt pt_entInt
289// pt12 entArith pt_entArith
290// pt13 reserved for system specific PAL
291// pt14 reserved for system specific PAL
292// pt15 reserved for system specific PAL
293// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami, pt_mces
294// pt17 sysval pt_sysval
295// pt18 usp pt_usp
296// pt19 ksp pt_ksp
297// pt20 PTBR pt_ptbr
298// pt21 entMM pt_entMM
299// pt22 kgp pt_kgp
300// pt23 PCBB pt_pcbb
301//
302//-
303
304// .sbttl "PALshadow register usage"
305//
306//+
307//
308// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
309// This maps the OSF PAL usage of R8 - R14 and R25:
310//
311// r8 ITBmiss/DTBmiss scratch
312// r9 ITBmiss/DTBmiss scratch
313// r10 ITBmiss/DTBmiss scratch
314// r11 PS
315// r12 local scratch
316// r13 local scratch
317// r14 local scratch
318// r25 local scratch
319//
320//
321//-
322
323// .sbttl "ALPHA symbol definitions"
324// _OSF_PSDEF GLOBAL
325// _OSF_PTEDEF GLOBAL
326// _OSF_VADEF GLOBAL
327// _OSF_PCBDEF GLOBAL
328// _OSF_SFDEF GLOBAL
329// _OSF_MMCSR_DEF GLOBAL
330// _SCBDEF GLOBAL
331// _FRMDEF GLOBAL
332// _EXSDEF GLOBAL
333// _OSF_A0_DEF GLOBAL
334// _MCESDEF GLOBAL
335
336// .sbttl "EV5 symbol definitions"
337
338// _EV5DEF
339// _PALTEMP
340// _MM_STAT_DEF
341// _EV5_MM
342// _EV5_IPLDEF
343
344// _HALT_CODES GLOBAL
345// _MCHK_CODES GLOBAL
346
347// _PAL_IMPURE
348// _PAL_LOGOUT
349
350
351
352
353// .sbttl "PALcode configuration options"
354
355// There are a number of options that may be assembled into this version of
356// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
357// the following). The options that can be adjusted cause the resultant PALcode
358// to reflect the desired target system.
359
360
361#define osfpal 1 // This is the PALcode for OSF.
362
363#ifndef rawhide_system
364
365#define rawhide_system 0
366#endif
367
368
369#ifndef real_mm
370// Page table translation vs 1-1 mapping
371#define real_mm 1
372#endif
373
374
375#ifndef rax_mode
376
377#define rax_mode 0
378#endif
379
380#ifndef egore
381// End of reset flow starts a program at 200000(hex).
382#define egore 1
383#endif
384
385#ifndef acore
386// End of reset flow starts a program at 40000(hex).
387#define acore 0
388#endif
389
390
391// assume acore+egore+rax_mode lt 2 // Assertion checker
392
393#ifndef beh_model
394// EV5 behavioral model specific code
395#define beh_model 1
396#endif
397
398#ifndef init_cbox
399// Reset flow init of Bcache and Scache
400#define init_cbox 1
401#endif
402
403#ifndef disable_crd
404// Decides whether the reset flow will disable
405#define disable_crd 0
406#endif
407
408 // correctable read interrupts via ICSR
409#ifndef perfmon_debug
410#define perfmon_debug 0
411#endif
412
413#ifndef icflush_on_tbix
414#define icflush_on_tbix 0
415#endif
416
417#ifndef remove_restore_state
418#define remove_restore_state 0
419#endif
420
421#ifndef remove_save_state
422#define remove_save_state 0
423#endif
424
425#ifndef enable_physical_console
426#define enable_physical_console 0
427#endif
428
429#ifndef ev5_p1
430#define ev5_p1 0
431#endif
432
433#ifndef ev5_p2
434#define ev5_p2 1
435#endif
436
437// assume ev5_p1+ev5_p2 eq 1
438
439#ifndef ldvpte_bug_fix
440#define ldvpte_bug_fix 1 // If set, fix ldvpte bug in dtbmiss_double flow.
441#endif
442
443#ifndef spe_fix
444// If set, disable super-page mode in user mode and re-enable
445#define spe_fix 0
446#endif
447 // in kernel. Workaround for cpu bug.
448#ifndef build_fixed_image
449#define build_fixed_image 0
450#endif
451
452
453#ifndef fill_err_hack
454// If set, disable fill_error mode in user mode and re-enable
455#define fill_err_hack 0
456#endif
457
458 // in kernel. Workaround for cpu bug.
459
460// .macro hw_rei_spe
461// .iif eq spe_fix, hw_rei
462//#if spe_fix != 0
463//
464//
465//#define hw_rei_chm_count hw_rei_chm_count + 1
466// p4_fixup_label \hw_rei_chm_count
467// .iif eq build_fixed_image, br r31, hw_rei_update_spe
468// .iif ne build_fixed_image, hw_rei
469//#endif
470//
471// .endm
472
473// Add flag "osf_chm_fix" to enable dcache in user mode only
474// to avoid cpu bug.
475
476#ifndef osf_chm_fix
477// If set, enable D-Cache in
478#define osf_chm_fix 0
479#endif
480
481#if osf_chm_fix != 0
482// user mode only.
483#define hw_rei_chm_count 0
484#endif
485
486#if osf_chm_fix != 0
487
488#define hw_rei_stall_chm_count 0
489#endif
490
491#ifndef enable_p4_fixups
492
493#define enable_p4_fixups 0
494#endif
495
496 // If set, do EV5 Pass 4 fixups
497#if spe_fix == 0
498
499#define osf_chm_fix 0
500#endif
501
502#if spe_fix == 0
503
504#define enable_p4_fixups 0
505#endif
506
507 // Only allow fixups if fix enabled
508
509 //Turn off fill_errors and MEM_NEM in user mode
510// .macro fill_error_hack ?L10_, ?L20_, ?L30_, ?L40_
511// //save r22,r23,r24
512// stqp r22, 0x150(r31) //add
513// stqp r23, 0x158(r31) //contents
514// stqp r24, 0x160(r31) //bit mask
515//
516// lda r22, 0x82(r31)
517// ldah r22, 0x8740(r22)
518// sll r22, 8, r22
519// ldlp r23, 0x80(r22) // r23 <- contents of CIA_MASK
520// bis r23,r31,r23
521//
522// lda r24, 0x8(r31) // r24 <- MEM_NEM bit
523// beq r10, L10_ // IF user mode (r10<0> == 0) pal mode
524// bic r23, r24, r23 // set fillerr_en bit
525// br r31, L20_ // ELSE
526//L10_: bis r23, r24, r23 // clear fillerr_en bit
527//L20_: // ENDIF
528//
529// stlp r23, 0x80(r22) // write back the CIA_MASK register
530// mb
531// ldlp r23, 0x80(r22)
532// bis r23,r31,r23
533// mb
534//
535// lda r22, 1(r31) // r22 <- 87.4000.0100 ptr to CIA_CTRL
536// ldah r22, 0x8740(r22)
537// sll r22, 8, r22
538// ldlp r23, 0(r22) // r23 <- contents of CIA_CTRL
539// bis r23,r31,r23
540//
541//
542// lda r24, 0x400(r31) // r9 <- fillerr_en bit
543// beq r10, L30_ // IF user mode (r10<0> == 0) pal mode
544// bic r23, r24, r23 // set fillerr_en bit
545// br r31, L40_ // ELSE
546//L30_: bis r23, r24, r23 // clear fillerr_en bit
547//L40_: // ENDIF
548//
549// stlp r23, 0(r22) // write back the CIA_CTRL register
550// mb
551// ldlp r23, 0(r22)
552// bis r23,r31,r23
553// mb
554//
555// //restore r22,r23,r24
556// ldqp r22, 0x150(r31)
557// ldqp r23, 0x158(r31)
558// ldqp r24, 0x160(r31)
559//
560// .endm
561
562// multiprocessor support can be enabled for a max of n processors by
563// setting the following to the number of processors on the system.
564// Note that this is really the max cpuid.
565
566#ifndef max_cpuid
567#define max_cpuid 8
568#endif
569
570#ifndef osf_svmin // platform specific palcode version number
571#define osf_svmin 0
572#endif
573
574
575#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
576
577// .mcall ldqp // override macro64 definition with macro from library
578// .mcall stqp // override macro64 definition with macro from library
579
580
581// .psect _pal,mix
582// huh pb pal_base:
583// huh pb #define current_block_base . - pal_base
584
585// .sbttl "RESET - Reset Trap Entry Point"
586//+
587// RESET - offset 0000
588// Entry:
589// Vectored into via hardware trap on reset, or branched to
590// on swppal.
591//
592// r0 = whami
593// r1 = pal_base
594// r2 = base of scratch area
595// r3 = halt code
596//
597//
598// Function:
599//
600//-
601
602 .text 0
603 . = 0x0000
604 .globl Pal_Base
605Pal_Base:
606 HDW_VECTOR(PAL_RESET_ENTRY)
607Trap_Reset:
608 nop
609#ifdef SIMOS
610 /*
611 * store into r1
612 */
613 br r1,sys_reset
614#else
615 /* following is a srcmax change */
616
617 DEBUGSTORE(0x41)
618 /* The original code jumped using r1 as a linkage register to pass the base
619 of PALcode to the platform specific code. We use r1 to pass a parameter
620 from the SROM, so we hardcode the address of Pal_Base in platform.s
621 */
622 br r31, sys_reset
623#endif
624
625 // Specify PAL version info as a constant
626 // at a known location (reset + 8).
627
628 .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
629 .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
630 .long 0
631 .long 0
632pal_impure_start:
633 .quad 0
634pal_debug_ptr:
635 .quad 0 // reserved for debug pointer ; 20
636#if beh_model == 0
637
638
639#if enable_p4_fixups != 0
640
641
642 .quad 0
643 .long p4_fixup_hw_rei_fixup_table
644#endif
645
646#else
647
648 .quad 0 //
649 .quad 0 //0x0030
650 .quad 0
651 .quad 0 //0x0040
652 .quad 0
653 .quad 0 //0x0050
654 .quad 0
655 .quad 0 //0x0060
656 .quad 0
657pal_enter_cns_address:
658 .quad 0 //0x0070 -- address to jump to from enter_console
659 .long <<sys_exit_console-pal_base>+1> //0x0078 -- offset to sys_exit_console (set palmode bit)
660#endif
661
662
663
664
665// .sbttl "IACCVIO- Istream Access Violation Trap Entry Point"
666
667//+
668// IACCVIO - offset 0080
669// Entry:
670// Vectored into via hardware trap on Istream access violation or sign check error on PC.
671//
672// Function:
673// Build stack frame
674// a0 <- Faulting VA
675// a1 <- MMCSR (1 for ACV)
676// a2 <- -1 (for ifetch fault)
677// vector via entMM
678//-
679
680 HDW_VECTOR(PAL_IACCVIO_ENTRY)
681Trap_Iaccvio:
682 DEBUGSTORE(0x42)
683 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
684 mtpr r31, ev5__ps // Set Ibox current mode to kernel
685
686 bis r11, r31, r12 // Save PS
687 bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
688
689
690 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
691 // no virt ref for next 2 cycles
692 mtpr r30, pt_usp // save user stack
693
694 bis r31, r31, r12 // Set new PS
695 mfpr r30, pt_ksp
696
697TRAP_IACCVIO_10_:
698 lda sp, 0-osfsf_c_size(sp)// allocate stack space
699 mfpr r14, exc_addr // get pc
700
701 stq r16, osfsf_a0(sp) // save regs
702 bic r14, 3, r16 // pass pc/va as a0
703
704 stq r17, osfsf_a1(sp) // a1
705 or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
706
707 stq r18, osfsf_a2(sp) // a2
708 mfpr r13, pt_entmm // get entry point
709
710 stq r11, osfsf_ps(sp) // save old ps
711 bis r12, r31, r11 // update ps
712
713 stq r16, osfsf_pc(sp) // save pc
714 stq r29, osfsf_gp(sp) // save gp
715
716 mtpr r13, exc_addr // load exc_addr with entMM
717 // 1 cycle to hw_rei
718 mfpr r29, pt_kgp // get the kgp
719
720 subq r31, 1, r18 // pass flag of istream, as a2
721 hw_rei_spe
722
723
724// .sbttl "INTERRUPT- Interrupt Trap Entry Point"
725
726//+
727// INTERRUPT - offset 0100
728// Entry:
729// Vectored into via trap on hardware interrupt
730//
731// Function:
732// check for halt interrupt
733// check for passive release (current ipl geq requestor)
734// if necessary, switch to kernel mode
735// push stack frame, update ps (including current mode and ipl copies), sp, and gp
736// pass the interrupt info to the system module
737//
738//-
739
740
741 HDW_VECTOR(PAL_INTERRUPT_ENTRY)
742Trap_Interrupt:
743 mfpr r13, ev5__intid // Fetch level of interruptor
744 mfpr r25, ev5__isr // Fetch interrupt summary register
745
746 srl r25, isr_v_hlt, r9 // Get HLT bit
747 mfpr r14, ev5__ipl
748
749 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
750 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
751
752 cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
753 bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
754
755 and r11, osfps_m_mode, r10 // get mode bit
756 beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
757
758 mtpr r30, pt_usp // save user stack
759 mfpr r30, pt_ksp // get kern stack
760
761TRAP_INTERRUPT_10_:
762 lda sp, (0-osfsf_c_size)(sp)// allocate stack space
763 mfpr r14, exc_addr // get pc
764
765 stq r11, osfsf_ps(sp) // save ps
766 stq r14, osfsf_pc(sp) // save pc
767
768 stq r29, osfsf_gp(sp) // push gp
769 stq r16, osfsf_a0(sp) // a0
770
771// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
772 mtpr r31, ev5__ps // Set Ibox current mode to kernel
773 stq r17, osfsf_a1(sp) // a1
774
775 stq r18, osfsf_a2(sp) // a2
776 subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
777
778 srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
779 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
780
781 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
782 bis r12, r31, r11 // set new ps
783
784 mfpr r12, pt_intmask
785 and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
786
787#ifdef SIMOS
788 /*
789 * Lance had space problems. We don't.
790 */
791 extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
792 mfpr r29, pt_kgp // update gp
793 mtpr r14, ev5__ipl // load the new IPL into Ibox
794#else
795// Moved the following three lines to sys_interrupt to make room for debug
796// extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
797// mfpr r29, pt_kgp // update gp
798
799// mtpr r14, ev5__ipl // load the new IPL into Ibox
800#endif
801 br r31, sys_interrupt // Go handle interrupt
802
803
804
805// .sbttl "ITBMISS- Istream TBmiss Trap Entry Point"
806
807//+
808// ITBMISS - offset 0180
809// Entry:
810// Vectored into via hardware trap on Istream translation buffer miss.
811//
812// Function:
813// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
814// Can trap into DTBMISS_DOUBLE.
815// This routine can use the PALshadow registers r8, r9, and r10
816//
817//-
818
819 HDW_VECTOR(PAL_ITB_MISS_ENTRY)
820Trap_Itbmiss:
821#if real_mm == 0
822
823
824 // Simple 1-1 va->pa mapping
825
826 nop // Pad to align to E1
827 mfpr r8, exc_addr
828
829 srl r8, page_offset_size_bits, r9
830 sll r9, 32, r9
831
832 lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
833 mtpr r9, itb_pte // E1
834
835 hw_rei_stall // Nital says I don't have to obey shadow wait rule here.
836#else
837
838 // Real MM mapping
839 nop
840 mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
841
842 nop
843 mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
844
845pal_itb_ldq:
846 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
847 mtpr r10, exc_addr // Restore exc_address if there was a trap.
848
849 mfpr r31, ev5__va // Unlock VA in case there was a double miss
850 nop
851
852 and r8, osfpte_m_foe, r25 // Look for FOE set.
853 blbc r8, invalid_ipte_handler // PTE not valid.
854
855 nop
856 bne r25, foe_ipte_handler // FOE is set
857
858 nop
859 mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
860
861 hw_rei_stall //
862
863#endif
864
865
866
867
868// .sbttl "DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point"
869
870//+
871// DTBMISS_SINGLE - offset 0200
872// Entry:
873// Vectored into via hardware trap on Dstream single translation buffer miss.
874//
875// Function:
876// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
877// Can trap into DTBMISS_DOUBLE.
878// This routine can use the PALshadow registers r8, r9, and r10
879//-
880
881 HDW_VECTOR(PAL_DTB_MISS_ENTRY)
882Trap_Dtbmiss_Single:
883#if real_mm == 0
884 // Simple 1-1 va->pa mapping
885 mfpr r8, va // E0
886 srl r8, page_offset_size_bits, r9
887
888 sll r9, 32, r9
889 lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
890
891 mtpr r9, dtb_pte // E0
892 nop // Pad to align to E0
893
894
895
896 mtpr r8, dtb_tag // E0
897 nop
898
899 nop // Pad tag write
900 nop
901
902 nop // Pad tag write
903 nop
904
905 hw_rei
906#else
907 mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
908 mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
909
910// DEBUGSTORE(0x45)
911// DEBUG_EXC_ADDR()
912 // Real MM mapping
913 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
914 mtpr r10, pt6 // Stash exc_addr away
915
916pal_dtb_ldq:
917 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
918 nop // Pad MF VA
919
920 mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
921 nop
922
923 mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
924 blbc r8, invalid_dpte_handler // Handle invalid PTE
925
926 mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
927 mfpr r10, pt6
928
929 // Following 2 instructions take 2 cycles
930 mtpr r10, exc_addr // Return linkage in case we trapped. E1.
931 mfpr r31, pt0 // Pad the write to dtb_tag
932
933 hw_rei // Done, return
934#endif
935
936
937
938
939// .sbttl "DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point"
940
941//+
942// DTBMISS_DOUBLE - offset 0280
943// Entry:
944// Vectored into via hardware trap on Double TBmiss from single miss flows.
945//
946// r8 - faulting VA
947// r9 - original MMstat
948// r10 - original exc_addr (both itb,dtb miss)
949// pt6 - original exc_addr (dtb miss flow only)
950// VA IPR - locked with original faulting VA
951//
952// Function:
953// Get PTE, if valid load TB and return.
954// If not valid then take TNV/ACV exception.
955//
956// pt4 and pt5 are reserved for this flow.
957//
958//
959//-
960
961 HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
962Trap_Dtbmiss_double:
963#if ldvpte_bug_fix != 0
964 mtpr r8, pt4 // save r8 to do exc_addr check
965 mfpr r8, exc_addr
966 blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
967 mfpr r8, pt4 // restore r8
968#endif
969 nop
970 mtpr r22, pt5 // Get some scratch space. E1.
971 // Due to virtual scheme, we can skip the first lookup and go
972 // right to fetch of level 2 PTE
973 sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
974 mtpr r21, pt4 // Get some scratch space. E1.
975
976 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
977 mfpr r21, pt_ptbr // Get physical address of the page table.
978
979 nop
980 addq r21, r22, r21 // Index into page table for level 2 PTE.
981
982 sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
983 ldqp r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
984
985 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
986 blbc r21, double_pte_inv // Check for Invalid PTE.
987
988 srl r21, 32, r21 // extract PFN from PTE
989 sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
990
991 addq r21, r22, r21 // Index into page table for level 3 PTE.
992 nop
993
994 ldqp r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
995 blbc r21, double_pte_inv // Check for invalid PTE.
996
997 mtpr r21, ev5__dtb_pte // Write the PTE. E0.
998 mfpr r22, pt5 // Restore scratch register
999
1000 mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
1001 mfpr r21, pt4 // Restore scratch register
1002
1003 nop // Pad write to tag.
1004 nop
1005
1006 nop // Pad write to tag.
1007 nop
1008
1009 hw_rei
1010
1011
1012
1013// .sbttl "UNALIGN -- Dstream unalign trap"
1014//+
1015// UNALIGN - offset 0300
1016// Entry:
1017// Vectored into via hardware trap on unaligned Dstream reference.
1018//
1019// Function:
1020// Build stack frame
1021// a0 <- Faulting VA
1022// a1 <- Opcode
1023// a2 <- src/dst register number
1024// vector via entUna
1025//-
1026
1027 HDW_VECTOR(PAL_UNALIGN_ENTRY)
1028Trap_Unalign:
1029/* DEBUGSTORE(0x47)*/
1030 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1031 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1032
1033 mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
1034 mfpr r14, exc_addr // get pc
1035
1036 srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
1037 blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
1038
1039 blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
1040 // not set, must be a load
1041 and r13, 0x1F, r8 // isolate ra
1042
1043 cmpeq r8, 0x1F, r8 // check for r31/F31
1044 bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
1045
1046UNALIGN_NO_DISMISS:
1047 bis r11, r31, r12 // Save PS
1048 bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
1049
1050
1051 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1052 // no virt ref for next 2 cycles
1053 mtpr r30, pt_usp // save user stack
1054
1055 bis r31, r31, r12 // Set new PS
1056 mfpr r30, pt_ksp
1057
1058UNALIGN_NO_DISMISS_10_:
1059 mfpr r25, ev5__va // Unlock VA
1060 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1061
1062 mtpr r25, pt0 // Stash VA
1063 stq r18, osfsf_a2(sp) // a2
1064
1065 stq r11, osfsf_ps(sp) // save old ps
1066 srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
1067
1068 stq r29, osfsf_gp(sp) // save gp
1069 addq r14, 4, r14 // inc PC past the ld/st
1070
1071 stq r17, osfsf_a1(sp) // a1
1072 and r25, mm_stat_m_opcode, r17// Clean opocde for a1
1073
1074 stq r16, osfsf_a0(sp) // save regs
1075 mfpr r16, pt0 // a0 <- va/unlock
1076
1077 stq r14, osfsf_pc(sp) // save pc
1078 mfpr r25, pt_entuna // get entry point
1079
1080
1081 bis r12, r31, r11 // update ps
1082 br r31, unalign_trap_cont
1083
1084
1085
1086
1087// .sbttl "DFAULT - Dstream Fault Trap Entry Point"
1088
1089//+
1090// DFAULT - offset 0380
1091// Entry:
1092// Vectored into via hardware trap on dstream fault or sign check error on DVA.
1093//
1094// Function:
1095// Ignore faults on FETCH/FETCH_M
1096// Check for DFAULT in PAL
1097// Build stack frame
1098// a0 <- Faulting VA
1099// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
1100// a2 <- R/W
1101// vector via entMM
1102//
1103//-
1104 HDW_VECTOR(PAL_D_FAULT_ENTRY)
1105Trap_Dfault:
1106// DEBUGSTORE(0x48)
1107 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1108 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1109
1110 mfpr r13, ev5__mm_stat // Get mmstat
1111 mfpr r8, exc_addr // get pc, preserve r14
1112
1113 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
1114 blbs r8, dfault_in_pal
1115
1116 bis r8, r31, r14 // move exc_addr to correct place
1117 bis r11, r31, r12 // Save PS
1118
1119 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1120 // no virt ref for next 2 cycles
1121 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
1122
1123 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
1124 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
1125
1126 //dismiss exception if load to r31/f31
1127 blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
1128
1129 // not a store or fetch, must be a load
1130 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
1131
1132 and r9, 0x1F, r9 // isolate rnum
1133 nop
1134
1135 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
1136 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
1137
1138dfault_no_dismiss:
1139 and r13, 0xf, r13 // Clean extra bits in mm_stat
1140 bge r25, dfault_trap_cont // no stack swap needed if cm=kern
1141
1142
1143 mtpr r30, pt_usp // save user stack
1144 bis r31, r31, r12 // Set new PS
1145
1146 mfpr r30, pt_ksp
1147 br r31, dfault_trap_cont
1148
1149
1150
1151
1152
1153// .sbttl "MCHK - Machine Check Trap Entry Point"
1154
1155//+
1156// MCHK - offset 0400
1157// Entry:
1158// Vectored into via hardware trap on machine check.
1159//
1160// Function:
1161//
1162//-
1163
1164 HDW_VECTOR(PAL_MCHK_ENTRY)
1165Trap_Mchk:
1166 DEBUGSTORE(0x49)
1167 mtpr r31, ic_flush_ctl // Flush the Icache
1168 br r31, sys_machine_check
1169
1170
1171
1172
1173// .sbttl "OPCDEC - Illegal Opcode Trap Entry Point"
1174
1175//+
1176// OPCDEC - offset 0480
1177// Entry:
1178// Vectored into via hardware trap on illegal opcode.
1179//
1180// Build stack frame
1181// a0 <- code
1182// a1 <- unpred
1183// a2 <- unpred
1184// vector via entIF
1185//
1186//-
1187
1188 HDW_VECTOR(PAL_OPCDEC_ENTRY)
1189Trap_Opcdec:
1190 DEBUGSTORE(0x4a)
1191//simos DEBUG_EXC_ADDR()
1192 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1193 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1194
1195 mfpr r14, exc_addr // get pc
1196 blbs r14, pal_pal_bug_check // check opcdec in palmode
1197
1198 bis r11, r31, r12 // Save PS
1199 bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
1200
1201
1202 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1203 // no virt ref for next 2 cycles
1204 mtpr r30, pt_usp // save user stack
1205
1206 bis r31, r31, r12 // Set new PS
1207 mfpr r30, pt_ksp
1208
1209TRAP_OPCDEC_10_:
1210 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1211 addq r14, 4, r14 // inc pc
1212
1213 stq r16, osfsf_a0(sp) // save regs
1214 bis r31, osf_a0_opdec, r16 // set a0
1215
1216 stq r11, osfsf_ps(sp) // save old ps
1217 mfpr r13, pt_entif // get entry point
1218
1219 stq r18, osfsf_a2(sp) // a2
1220 stq r17, osfsf_a1(sp) // a1
1221
1222 stq r29, osfsf_gp(sp) // save gp
1223 stq r14, osfsf_pc(sp) // save pc
1224
1225 bis r12, r31, r11 // update ps
1226 mtpr r13, exc_addr // load exc_addr with entIF
1227 // 1 cycle to hw_rei, E1
1228
1229 mfpr r29, pt_kgp // get the kgp, E1
1230
1231 hw_rei_spe // done, E1
1232
1233
1234
1235
1236
1237
1238// .sbttl "ARITH - Arithmetic Exception Trap Entry Point"
1239
1240//+
1241// ARITH - offset 0500
1242// Entry:
1243// Vectored into via hardware trap on arithmetic excpetion.
1244//
1245// Function:
1246// Build stack frame
1247// a0 <- exc_sum
1248// a1 <- exc_mask
1249// a2 <- unpred
1250// vector via entArith
1251//
1252//-
1253 HDW_VECTOR(PAL_ARITH_ENTRY)
1254Trap_Arith:
1255 DEBUGSTORE(0x4b)
1256 and r11, osfps_m_mode, r12 // get mode bit
1257 mfpr r31, ev5__va // unlock mbox
1258
1259 bis r11, r31, r25 // save ps
1260 mfpr r14, exc_addr // get pc
1261
1262 nop
1263 blbs r14, pal_pal_bug_check // arith trap from PAL
1264
1265 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1266 // no virt ref for next 2 cycles
1267 beq r12, TRAP_ARITH_10_ // if zero we are in kern now
1268
1269 bis r31, r31, r25 // set the new ps
1270 mtpr r30, pt_usp // save user stack
1271
1272 nop
1273 mfpr r30, pt_ksp // get kern stack
1274
1275TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
1276 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1277
1278 nop // Pad current mode write and stq
1279 mfpr r13, ev5__exc_sum // get the exc_sum
1280
1281 mfpr r12, pt_entarith
1282 stq r14, osfsf_pc(sp) // save pc
1283
1284 stq r17, osfsf_a1(sp)
1285 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
1286
1287 stq r11, osfsf_ps(sp) // save ps
1288 bis r25, r31, r11 // set new ps
1289
1290 stq r16, osfsf_a0(sp) // save regs
1291 srl r13, exc_sum_v_swc, r16// shift data to correct position
1292
1293 stq r18, osfsf_a2(sp)
1294// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
1295 mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
1296
1297 stq r29, osfsf_gp(sp)
1298 mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
1299
1300 mfpr r29, pt_kgp // get the kern gp - E1
1301 hw_rei_spe // done - E1
1302
1303
1304
1305
1306
1307
1308// .sbttl "FEN - Illegal Floating Point Operation Trap Entry Point"
1309
1310//+
1311// FEN - offset 0580
1312// Entry:
1313// Vectored into via hardware trap on illegal FP op.
1314//
1315// Function:
1316// Build stack frame
1317// a0 <- code
1318// a1 <- unpred
1319// a2 <- unpred
1320// vector via entIF
1321//
1322//-
1323
1324 HDW_VECTOR(PAL_FEN_ENTRY)
1325Trap_Fen:
1326 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1327 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1328
1329 mfpr r14, exc_addr // get pc
1330 blbs r14, pal_pal_bug_check // check opcdec in palmode
1331
1332 mfpr r13, ev5__icsr
1333 nop
1334
1335 bis r11, r31, r12 // Save PS
1336 bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
1337
1338 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1339 // no virt ref for next 2 cycles
1340 mtpr r30, pt_usp // save user stack
1341
1342 bis r31, r31, r12 // Set new PS
1343 mfpr r30, pt_ksp
1344
1345TRAP_FEN_10_:
1346 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1347 srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
1348
1349
1350 stq r16, osfsf_a0(sp) // save regs
1351 mfpr r13, pt_entif // get entry point
1352
1353 stq r18, osfsf_a2(sp) // a2
1354 stq r11, osfsf_ps(sp) // save old ps
1355
1356 stq r29, osfsf_gp(sp) // save gp
1357 bis r12, r31, r11 // set new ps
1358
1359 stq r17, osfsf_a1(sp) // a1
1360 blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
1361
1362 bis r31, osf_a0_fen, r16 // set a0
1363 stq r14, osfsf_pc(sp) // save pc
1364
1365 mtpr r13, exc_addr // load exc_addr with entIF
1366 // 1 cycle to hw_rei -E1
1367
1368 mfpr r29, pt_kgp // get the kgp -E1
1369
1370 hw_rei_spe // done -E1
1371
1372// FEN trap was taken, but the fault is really opcdec.
1373 ALIGN_BRANCH
1374fen_to_opcdec:
1375 addq r14, 4, r14 // save PC+4
1376 bis r31, osf_a0_opdec, r16 // set a0
1377
1378 stq r14, osfsf_pc(sp) // save pc
1379 mtpr r13, exc_addr // load exc_addr with entIF
1380 // 1 cycle to hw_rei
1381
1382 mfpr r29, pt_kgp // get the kgp
1383 hw_rei_spe // done
1384
1385
1386
1387// .sbttl "Misc handlers"
1388 // Start area for misc code.
1389//+
1390//dfault_trap_cont
1391// A dfault trap has been taken. The sp has been updated if necessary.
1392// Push a stack frame a vector via entMM.
1393//
1394// Current state:
1395// r12 - new PS
1396// r13 - MMstat
1397// VA - locked
1398//
1399//-
1400 ALIGN_BLOCK
1401dfault_trap_cont:
1402 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1403 mfpr r25, ev5__va // Fetch VA/unlock
1404
1405 stq r18, osfsf_a2(sp) // a2
1406 and r13, 1, r18 // Clean r/w bit for a2
1407
1408 stq r16, osfsf_a0(sp) // save regs
1409 bis r25, r31, r16 // a0 <- va
1410
1411 stq r17, osfsf_a1(sp) // a1
1412 srl r13, 1, r17 // shift fault bits to right position
1413
1414 stq r11, osfsf_ps(sp) // save old ps
1415 bis r12, r31, r11 // update ps
1416
1417 stq r14, osfsf_pc(sp) // save pc
1418 mfpr r25, pt_entmm // get entry point
1419
1420 stq r29, osfsf_gp(sp) // save gp
1421 cmovlbs r17, 1, r17 // a2. acv overrides fox.
1422
1423 mtpr r25, exc_addr // load exc_addr with entMM
1424 // 1 cycle to hw_rei
1425 mfpr r29, pt_kgp // get the kgp
1426
1427 hw_rei_spe // done
1428
1429//+
1430//unalign_trap_cont
1431// An unalign trap has been taken. Just need to finish up a few things.
1432//
1433// Current state:
1434// r25 - entUna
1435// r13 - shifted MMstat
1436//
1437//-
1438 ALIGN_BLOCK
1439unalign_trap_cont:
1440 mtpr r25, exc_addr // load exc_addr with entUna
1441 // 1 cycle to hw_rei
1442
1443
1444 mfpr r29, pt_kgp // get the kgp
1445 and r13, mm_stat_m_ra, r18 // Clean Ra for a2
1446
1447 hw_rei_spe // done
1448
1449
1450
1451//+
1452// dfault_in_pal
1453// Dfault trap was taken, exc_addr points to a PAL PC.
1454// r9 - mmstat<opcode> right justified
1455// r8 - exception address
1456//
1457// These are the cases:
1458// opcode was STQ -- from a stack builder, KSP not valid halt
1459// r14 - original exc_addr
1460// r11 - original PS
1461// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
1462// KSP not valid halt
1463// r11 - original PS
1464// r14 - original exc_addr
1465// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
1466// r11 - original PS
1467// r14 - original exc_addr
1468// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
1469// r10 - original exc_addr
1470// r11 - original PS
1471//
1472//
1473//-
1474 ALIGN_BLOCK
1475dfault_in_pal:
1476 DEBUGSTORE(0x50)
1477 bic r8, 3, r8 // Clean PC
1478 mfpr r9, pal_base
1479
1480 mfpr r31, va // unlock VA
1481#if real_mm != 0
1482 // if not real_mm, should never get here from miss flows
1483
1484 subq r9, r8, r8 // pal_base - offset
1485
1486 lda r9, pal_itb_ldq-pal_base(r8)
1487 nop
1488
1489 beq r9, dfault_do_bugcheck
1490 lda r9, pal_dtb_ldq-pal_base(r8)
1491
1492 beq r9, dfault_do_bugcheck
1493#endif
1494
1495//
1496// KSP invalid halt case --
1497ksp_inval_halt:
1498 DEBUGSTORE(76)
1499 bic r11, osfps_m_mode, r11 // set ps to kernel mode
1500 mtpr r0, pt0
1501
1502 mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
1503 mtpr r31, ips
1504
1505 mtpr r14, exc_addr // Set PC to instruction that caused trouble
1506//orig pvc_jsr updpcb, bsr=1
1507 bsr r0, pal_update_pcb // update the pcb
1508
1509 lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
1510 br r31, sys_enter_console // enter the console
1511
1512 ALIGN_BRANCH
1513dfault_do_bugcheck:
1514 bis r10, r31, r14 // bugcheck expects exc_addr in r14
1515 br r31, pal_pal_bug_check
1516
1517
1518 ALIGN_BLOCK
1519//+
1520// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
1521// On entry -
1522// r14 - exc_addr
1523// VA is locked
1524//
1525//-
1526dfault_fetch_ldr31_err:
1527 mtpr r11, ev5__dtb_cm
1528 mtpr r11, ev5__ps // Make sure ps hasn't changed
1529
1530 mfpr r31, va // unlock the mbox
1531 addq r14, 4, r14 // inc the pc to skip the fetch
1532
1533 mtpr r14, exc_addr // give ibox new PC
1534 mfpr r31, pt0 // pad exc_addr write
1535
1536 hw_rei
1537
1538
1539
1540 ALIGN_BLOCK
1541//+
1542// sys_from_kern
1543// callsys from kernel mode - OS bugcheck machine check
1544//
1545//-
1546sys_from_kern:
1547 mfpr r14, exc_addr // PC points to call_pal
1548 subq r14, 4, r14
1549
1550 lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
1551 br r31, pal_pal_mchk
1552
1553
1554// .sbttl "Continuation of long call_pal flows"
1555 ALIGN_BLOCK
1556//+
1557// wrent_tbl
1558// Table to write *int in paltemps.
1559// 4 instructions/entry
1560// r16 has new value
1561//
1562//-
1563wrent_tbl:
1564//orig pvc_jsr wrent, dest=1
1565 nop
1566 mtpr r16, pt_entint
1567
1568 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1569 hw_rei
1570
1571
1572//orig pvc_jsr wrent, dest=1
1573 nop
1574 mtpr r16, pt_entarith
1575
1576 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1577 hw_rei
1578
1579
1580//orig pvc_jsr wrent, dest=1
1581 nop
1582 mtpr r16, pt_entmm
1583
1584 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1585 hw_rei
1586
1587
1588//orig pvc_jsr wrent, dest=1
1589 nop
1590 mtpr r16, pt_entif
1591
1592 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1593 hw_rei
1594
1595
1596//orig pvc_jsr wrent, dest=1
1597 nop
1598 mtpr r16, pt_entuna
1599
1600 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1601 hw_rei
1602
1603
1604//orig pvc_jsr wrent, dest=1
1605 nop
1606 mtpr r16, pt_entsys
1607
1608 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1609 hw_rei
1610
1611 ALIGN_BLOCK
1612//+
1613// tbi_tbl
1614// Table to do tbi instructions
1615// 4 instructions per entry
1616//-
1617tbi_tbl:
1618 // -2 tbia
1619//orig pvc_jsr tbi, dest=1
1620 mtpr r31, ev5__dtb_ia // Flush DTB
1621 mtpr r31, ev5__itb_ia // Flush ITB
1622
1623#if icflush_on_tbix != 0
1624
1625
1626 br r31, pal_ic_flush // Flush Icache
1627#else
1628
1629 hw_rei_stall
1630#endif
1631
1632 nop // Pad table
1633
1634 // -1 tbiap
1635//orig pvc_jsr tbi, dest=1
1636 mtpr r31, ev5__dtb_iap // Flush DTB
1637 mtpr r31, ev5__itb_iap // Flush ITB
1638
1639#if icflush_on_tbix != 0
1640
1641
1642 br r31, pal_ic_flush // Flush Icache
1643#else
1644
1645 hw_rei_stall
1646#endif
1647
1648 nop // Pad table
1649
1650
1651 // 0 unused
1652//orig pvc_jsr tbi, dest=1
1653 hw_rei // Pad table
1654 nop
1655 nop
1656 nop
1657
1658
1659 // 1 tbisi
1660//orig pvc_jsr tbi, dest=1
1661#if icflush_on_tbix != 0
1662
1663
1664
1665 nop
1666 br r31, pal_ic_flush_and_tbisi // Flush Icache
1667 nop
1668 nop // Pad table
1669#else
1670
1671 nop
1672 nop
1673 mtpr r17, ev5__itb_is // Flush ITB
1674 hw_rei_stall
1675#endif
1676
1677
1678
1679 // 2 tbisd
1680//orig pvc_jsr tbi, dest=1
1681 mtpr r17, ev5__dtb_is // Flush DTB.
1682 nop
1683
1684 nop
1685 hw_rei_stall
1686
1687
1688 // 3 tbis
1689//orig pvc_jsr tbi, dest=1
1690 mtpr r17, ev5__dtb_is // Flush DTB
1691#if icflush_on_tbix != 0
1692
1693
1694 br r31, pal_ic_flush_and_tbisi // Flush Icache and ITB
1695#else
1696 br r31, tbi_finish
1697 ALIGN_BRANCH
1698tbi_finish:
1699 mtpr r17, ev5__itb_is // Flush ITB
1700 hw_rei_stall
1701#endif
1702
1703
1704
1705 ALIGN_BLOCK
1706//+
1707// bpt_bchk_common:
1708// Finish up the bpt/bchk instructions
1709//-
1710bpt_bchk_common:
1711 stq r18, osfsf_a2(sp) // a2
1712 mfpr r13, pt_entif // get entry point
1713
1714 stq r12, osfsf_ps(sp) // save old ps
1715 stq r14, osfsf_pc(sp) // save pc
1716
1717 stq r29, osfsf_gp(sp) // save gp
1718 mtpr r13, exc_addr // load exc_addr with entIF
1719 // 1 cycle to hw_rei
1720
1721 mfpr r29, pt_kgp // get the kgp
1722
1723
1724 hw_rei_spe // done
1725
1726
1727 ALIGN_BLOCK
1728//+
1729// rti_to_user
1730// Finish up the rti instruction
1731//-
1732rti_to_user:
1733 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
1734 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
1735
1736 mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
1737 mtpr r25, pt_ksp // save off incase RTI to user
1738
1739 mfpr r30, pt_usp
1740 hw_rei_spe // and back
1741
1742
1743 ALIGN_BLOCK
1744//+
1745// rti_to_kern
1746// Finish up the rti instruction
1747//-
1748rti_to_kern:
1749 and r12, osfps_m_ipl, r11 // clean ps
1750 mfpr r12, pt_intmask // get int mask
1751
1752 extbl r12, r11, r12 // get mask for this ipl
1753 mtpr r25, pt_ksp // save off incase RTI to user
1754
1755 mtpr r12, ev5__ipl // set the new ipl.
1756 or r25, r31, sp // sp
1757
1758// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
1759 hw_rei
1760
1761 ALIGN_BLOCK
1762//+
1763// swpctx_cont
1764// Finish up the swpctx instruction
1765//-
1766
1767swpctx_cont:
1768#if ev5_p1 != 0
1769
1770
1771 bic r25, r24, r25 // clean icsr<FPE>
1772 get_impure r8 // get impure pointer
1773
1774 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1775 fix_impure_ipr r8 // adjust impure pointer
1776
1777 restore_reg1 pmctr_ctl, r8, r8, ipr=1 // "ldqp" - get pmctr_ctl bits
1778 srl r23, 32, r24 // move asn to low asn pos
1779
1780 ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
1781 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1782
1783 or r25, r12, r25 // icsr with new fen
1784 sll r24, itb_asn_v_asn, r12
1785
1786#else
1787
1788 bic r25, r24, r25 // clean icsr<FPE,PMP>
1789 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1790
1791 ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
1792 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1793
1794 or r25, r12, r25 // icsr with new fen
1795 srl r23, 32, r24 // move asn to low asn pos
1796
1797 and r22, 1, r22
1798 sll r24, itb_asn_v_asn, r12
1799
1800 sll r22, icsr_v_pmp, r22
1801 nop
1802
1803 or r25, r22, r25 // icsr with new pme
1804#endif
1805
1806 sll r24, dtb_asn_v_asn, r24
1807
1808 subl r23, r13, r13 // gen new cc offset
1809 mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
1810
1811 mtpr r24, dtb_asn // Load up new ASN
1812 mtpr r25, icsr // write the icsr
1813
1814 sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1815 ldqp r25, osfpcb_q_usp(r16) // get new usp
1816
1817 insll r13, 4, r13 // >> 32
1818// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
1819 mtpr r14, pt_ptbr // load the new ptbr
1820
1821 mtpr r13, cc // set new offset
1822 ldqp r30, osfpcb_q_ksp(r16) // get new ksp
1823
1824// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
1825 mtpr r25, pt_usp // save usp
1826
1827#if ev5_p1 != 0
1828
1829
1830 blbc r8, no_pm_change // if monitoring all processes -- no need to change pm
1831
1832 // otherwise, monitoring select processes - update pm
1833 lda r25, 0x3F(r31)
1834 cmovlbc r22, r31, r8 // if pme set, disable counters, otherwise use saved encodings
1835
1836 sll r25, pmctr_v_ctl2, r25 // create ctl field bit mask
1837 mfpr r22, ev5__pmctr
1838
1839 and r8, r25, r8 // mask new ctl value
1840 bic r22, r25, r22 // clear ctl field in pmctr
1841
1842 or r8, r22, r8
1843 mtpr r8, ev5__pmctr
1844
1845no_pm_change:
1846#endif
1847
1848
1849#if osf_chm_fix != 0
1850
1851
1852 p4_fixup_hw_rei_stall // removes this section for Pass 4 by placing a hw_rei_stall here
1853
1854#if build_fixed_image != 0
1855
1856
1857 hw_rei_stall
1858#else
1859
1860 mfpr r9, pt_pcbb // get FEN
1861#endif
1862
1863 ldqp r9, osfpcb_q_fen(r9)
1864 blbc r9, no_pm_change_10_ // skip if FEN disabled
1865
1866 mb // ensure no outstanding fills
1867 lda r12, 1<<dc_mode_v_dc_ena(r31)
1868 mtpr r12, dc_mode // turn dcache on so we can flush it
1869 nop // force correct slotting
1870 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1871 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1872 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1873 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1874
1875 lda r8, 0(r31) // flood the dcache with junk data
1876no_pm_change_5_: ldqp r31, 0(r8)
1877 lda r8, 0x20(r8) // touch each cache block
1878 srl r8, 13, r9
1879 blbc r9, no_pm_change_5_
1880
1881 mb // ensure no outstanding fills
1882 mtpr r31, dc_mode // turn the dcache back off
1883 nop // force correct slotting
1884 mfpr r31, pt0 // no hw_rei_stall in 0,1
1885#endif
1886
1887
1888no_pm_change_10_: hw_rei_stall // back we go
1889
1890 ALIGN_BLOCK
1891//+
1892// swppal_cont - finish up the swppal call_pal
1893//-
1894
1895swppal_cont:
1896 mfpr r2, pt_misc // get misc bits
1897 sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1898 or r2, r0, r2 // set the bit
1899 mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
1900 mtpr r2, pt_misc // update the chip
1901
1902 or r3, r31, r4
1903 mfpr r3, pt_impure // pass pointer to the impure area in r3
1904//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
1905//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
1906//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
1907//orig unfix_impure_ipr r3 // restore impure pointer
1908 lda r3, CNS_Q_IPR(r3)
1909 RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1910 RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1911 lda r3, -CNS_Q_IPR(r3)
1912
1913 or r31, r31, r0 // set status to success
1914// pvc_violate 1007
1915 jmp r31, (r4) // and call our friend, it's her problem now
1916
1917
1918swppal_fail:
1919 addq r0, 1, r0 // set unknown pal or not loaded
1920 hw_rei // and return
1921
1922
1923// .sbttl "Memory management"
1924
1925 ALIGN_BLOCK
1926//+
1927//foe_ipte_handler
1928// IFOE detected on level 3 pte, sort out FOE vs ACV
1929//
1930// on entry:
1931// with
1932// R8 = pte
1933// R10 = pc
1934//
1935// Function
1936// Determine TNV vs ACV vs FOE. Build stack and dispatch
1937// Will not be here if TNV.
1938//-
1939
1940foe_ipte_handler:
1941 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1942 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1943
1944 bis r11, r31, r12 // Save PS for stack write
1945 bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
1946
1947
1948 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1949 // no virt ref for next 2 cycles
1950 mtpr r30, pt_usp // save user stack
1951
1952 bis r31, r31, r11 // Set new PS
1953 mfpr r30, pt_ksp
1954
1955 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1956 nop
1957
1958foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1959 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1960
1961 or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
1962 mfpr r13, pt_entmm // get entry point
1963
1964 stq r16, osfsf_a0(sp) // a0
1965 or r14, r31, r16 // pass pc/va as a0
1966
1967 stq r17, osfsf_a1(sp) // a1
1968 nop
1969
1970 stq r18, osfsf_a2(sp) // a2
1971 lda r17, mmcsr_c_acv(r31) // assume ACV
1972
1973 stq r16, osfsf_pc(sp) // save pc
1974 cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
1975
1976 stq r12, osfsf_ps(sp) // save ps
1977 subq r31, 1, r18 // pass flag of istream as a2
1978
1979 stq r29, osfsf_gp(sp)
1980 mtpr r13, exc_addr // set vector address
1981
1982 mfpr r29, pt_kgp // load kgp
1983 hw_rei_spe // out to exec
1984
1985 ALIGN_BLOCK
1986//+
1987//invalid_ipte_handler
1988// TNV detected on level 3 pte, sort out TNV vs ACV
1989//
1990// on entry:
1991// with
1992// R8 = pte
1993// R10 = pc
1994//
1995// Function
1996// Determine TNV vs ACV. Build stack and dispatch.
1997//-
1998
1999invalid_ipte_handler:
2000 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2001 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2002
2003 bis r11, r31, r12 // Save PS for stack write
2004 bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
2005
2006
2007 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2008 // no virt ref for next 2 cycles
2009 mtpr r30, pt_usp // save user stack
2010
2011 bis r31, r31, r11 // Set new PS
2012 mfpr r30, pt_ksp
2013
2014 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2015 nop
2016
2017invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
2018 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2019
2020 or r10, r31, r14 // Save pc/va in case TBmiss on stack
2021 mfpr r13, pt_entmm // get entry point
2022
2023 stq r16, osfsf_a0(sp) // a0
2024 or r14, r31, r16 // pass pc/va as a0
2025
2026 stq r17, osfsf_a1(sp) // a1
2027 nop
2028
2029 stq r18, osfsf_a2(sp) // a2
2030 and r25, 1, r17 // Isolate kre
2031
2032 stq r16, osfsf_pc(sp) // save pc
2033 xor r17, 1, r17 // map to acv/tnv as a1
2034
2035 stq r12, osfsf_ps(sp) // save ps
2036 subq r31, 1, r18 // pass flag of istream as a2
2037
2038 stq r29, osfsf_gp(sp)
2039 mtpr r13, exc_addr // set vector address
2040
2041 mfpr r29, pt_kgp // load kgp
2042 hw_rei_spe // out to exec
2043
2044
2045
2046
2047 ALIGN_BLOCK
2048//+
2049//invalid_dpte_handler
2050// INVALID detected on level 3 pte, sort out TNV vs ACV
2051//
2052// on entry:
2053// with
2054// R10 = va
2055// R8 = pte
2056// R9 = mm_stat
2057// PT6 = pc
2058//
2059// Function
2060// Determine TNV vs ACV. Build stack and dispatch
2061//-
2062
2063
2064invalid_dpte_handler:
2065 mfpr r12, pt6
2066 blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
2067
2068 bis r12, r31, r14 // save PC in case of tbmiss or fault
2069 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
2070
2071 mtpr r11, pt0 // Save PS for stack write
2072 and r25, mm_stat_m_opcode, r25 // isolate opcode
2073
2074 cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
2075 blbs r25, nmiss_fetch_ldr31_err // yes
2076
2077 //dismiss exception if load to r31/f31
2078 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
2079
2080 // not a store or fetch, must be a load
2081 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
2082
2083 and r25, 0x1F, r25 // isolate rnum
2084 nop
2085
2086 cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
2087 bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
2088
2089invalid_dpte_no_dismiss:
2090 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2091 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2092
2093 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2094 // no virt ref for next 2 cycles
2095 bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
2096
2097 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2098 mtpr r30, pt_usp // save user stack
2099
2100 bis r31, r31, r11 // Set new PS
2101 mfpr r30, pt_ksp
2102
2103invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
2104 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2105
2106 or r10, r31, r25 // Save va in case TBmiss on stack
2107 and r9, 1, r13 // save r/w flag
2108
2109 stq r16, osfsf_a0(sp) // a0
2110 or r25, r31, r16 // pass va as a0
2111
2112 stq r17, osfsf_a1(sp) // a1
2113 or r31, mmcsr_c_acv, r17 // assume acv
2114
2115 srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
2116 stq r29, osfsf_gp(sp)
2117
2118 stq r18, osfsf_a2(sp) // a2
2119 cmovlbs r13, r25, r12 // if write access move acv based on write enable
2120
2121 or r13, r31, r18 // pass flag of dstream access and read vs write
2122 mfpr r25, pt0 // get ps
2123
2124 stq r14, osfsf_pc(sp) // save pc
2125 mfpr r13, pt_entmm // get entry point
2126
2127 stq r25, osfsf_ps(sp) // save ps
2128 mtpr r13, exc_addr // set vector address
2129
2130 mfpr r29, pt_kgp // load kgp
2131 cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
2132
2133 hw_rei_spe // out to exec
2134
2135//+
2136//
2137// We come here if we are erring on a dtb_miss, and the instr is a
2138// fetch, fetch_m, of load to r31/f31.
2139// The PC is incremented, and we return to the program.
2140// essentially ignoring the instruction and error.
2141//
2142//-
2143 ALIGN_BLOCK
2144nmiss_fetch_ldr31_err:
2145 mfpr r12, pt6
2146 addq r12, 4, r12 // bump pc to pc+4
2147
2148 mtpr r12, exc_addr // and set entry point
2149 mfpr r31, pt0 // pad exc_addr write
2150
2151 hw_rei //
2152
2153 ALIGN_BLOCK
2154//+
2155// double_pte_inv
2156// We had a single tbmiss which turned into a double tbmiss which found
2157// an invalid PTE. Return to single miss with a fake pte, and the invalid
2158// single miss flow will report the error.
2159//
2160// on entry:
2161// r21 PTE
2162// r22 available
2163// VA IPR locked with original fault VA
2164// pt4 saved r21
2165// pt5 saved r22
2166// pt6 original exc_addr
2167//
2168// on return to tbmiss flow:
2169// r8 fake PTE
2170//
2171//
2172//-
2173double_pte_inv:
2174 srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
2175 mfpr r22, exc_addr // get the pc
2176
2177 lda r22, 4(r22) // inc the pc
2178 lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
2179
2180 cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
2181 mtpr r22, exc_addr // set for rei
2182
2183 mfpr r21, pt4 // restore regs
2184 mfpr r22, pt5 // restore regs
2185
2186 hw_rei // back to tb miss
2187
2188 ALIGN_BLOCK
2189//+
2190//tnv_in_pal
2191// The only places in pal that ld or store are the
2192// stack builders, rti or retsys. Any of these mean we
2193// need to take a ksp not valid halt.
2194//
2195//-
2196tnv_in_pal:
2197
2198
2199 br r31, ksp_inval_halt
2200
2201
2202// .sbttl "Icache flush routines"
2203
2204 ALIGN_BLOCK
2205//+
2206// Common Icache flush routine.
2207//
2208//
2209//-
2210pal_ic_flush:
2211 nop
2212 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
2213 nop
2214 nop
2215
2216// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2217 nop
2218 nop
2219 nop
2220 nop
2221
2222 nop
2223 nop
2224 nop
2225 nop
2226
2227 nop
2228 nop // 10
2229
2230 nop
2231 nop
2232 nop
2233 nop
2234
2235 nop
2236 nop
2237 nop
2238 nop
2239
2240 nop
2241 nop // 20
2242
2243 nop
2244 nop
2245 nop
2246 nop
2247
2248 nop
2249 nop
2250 nop
2251 nop
2252
2253 nop
2254 nop // 30
2255 nop
2256 nop
2257 nop
2258 nop
2259
2260 nop
2261 nop
2262 nop
2263 nop
2264
2265 nop
2266 nop // 40
2267
2268 nop
2269 nop
2270
2271one_cycle_and_hw_rei:
2272 nop
2273 nop
2274
2275 hw_rei_stall
2276
2277#if icflush_on_tbix != 0
2278
2279
2280 ALIGN_BLOCK
2281
2282//+
2283// Common Icache flush and ITB invalidate single routine.
2284// ITBIS and hw_rei_stall must be in same octaword.
2285// r17 - has address to invalidate
2286//
2287//-
2288PAL_IC_FLUSH_AND_TBISI:
2289 nop
2290 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
2291 nop
2292 nop
2293
2294// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2295 nop
2296 nop
2297 nop
2298 nop
2299
2300 nop
2301 nop
2302 nop
2303 nop
2304
2305 nop
2306 nop // 10
2307
2308 nop
2309 nop
2310 nop
2311 nop
2312
2313 nop
2314 nop
2315 nop
2316 nop
2317
2318 nop
2319 nop // 20
2320
2321 nop
2322 nop
2323 nop
2324 nop
2325
2326 nop
2327 nop
2328 nop
2329 nop
2330
2331 nop
2332 nop // 30
2333 nop
2334 nop
2335 nop
2336 nop
2337
2338 nop
2339 nop
2340 nop
2341 nop
2342
2343 nop
2344 nop // 40
2345
2346
2347 nop
2348 nop
2349
2350 nop
2351 nop
2352
2353 // A quadword is 64 bits, so an octaword is 128 bits -> 16 bytes -> 4 instructions
2354 // 44 nops plus 4 instructions before it is 48 instructions.
2355 // Since this routine started on a 32-byte (8 instruction) boundary,
2356 // the following 2 instructions will be in the same octword as required.
2357// ALIGN_BRANCH
2358 mtpr r17, ev5__itb_is // Flush ITB
2359 hw_rei_stall
2360
2361#endif
2362
2363 ALIGN_BLOCK
2364//+
2365//osfpal_calpal_opcdec
2366// Here for all opcdec CALL_PALs
2367//
2368// Build stack frame
2369// a0 <- code
2370// a1 <- unpred
2371// a2 <- unpred
2372// vector via entIF
2373//
2374//-
2375
2376osfpal_calpal_opcdec:
2377 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2378 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2379
2380 mfpr r14, exc_addr // get pc
2381 nop
2382
2383 bis r11, r31, r12 // Save PS for stack write
2384 bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
2385
2386
2387 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2388 // no virt ref for next 2 cycles
2389 mtpr r30, pt_usp // save user stack
2390
2391 bis r31, r31, r11 // Set new PS
2392 mfpr r30, pt_ksp
2393
2394osfpal_calpal_opcdec_10_:
2395 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2396 nop
2397
2398 stq r16, osfsf_a0(sp) // save regs
2399 bis r31, osf_a0_opdec, r16 // set a0
2400
2401 stq r18, osfsf_a2(sp) // a2
2402 mfpr r13, pt_entif // get entry point
2403
2404 stq r12, osfsf_ps(sp) // save old ps
2405 stq r17, osfsf_a1(sp) // a1
2406
2407 stq r14, osfsf_pc(sp) // save pc
2408 nop
2409
2410 stq r29, osfsf_gp(sp) // save gp
2411 mtpr r13, exc_addr // load exc_addr with entIF
2412 // 1 cycle to hw_rei
2413
2414 mfpr r29, pt_kgp // get the kgp
2415
2416
2417 hw_rei_spe // done
2418
2419
2420
2421
2422
2423//+
2424//pal_update_pcb
2425// Update the PCB with the current SP, AST, and CC info
2426//
2427// r0 - return linkage
2428//-
2429 ALIGN_BLOCK
2430
2431pal_update_pcb:
2432 mfpr r12, pt_pcbb // get pcbb
2433 and r11, osfps_m_mode, r25 // get mode
2434 beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
2435 mtpr r30, pt_usp // save user stack
2436 stqp r30, osfpcb_q_usp(r12) // store usp
2437 br r31, pal_update_pcb_20_ // join common
2438pal_update_pcb_10_: stqp r30, osfpcb_q_ksp(r12) // store ksp
2439pal_update_pcb_20_: rpcc r13 // get cyccounter
2440 srl r13, 32, r14 // move offset
2441 addl r13, r14, r14 // merge for new time
2442 stlp r14, osfpcb_l_cc(r12) // save time
2443
2444//orig pvc_jsr updpcb, bsr=1, dest=1
2445 ret r31, (r0)
2446
2447
2448
2449#if remove_save_state == 0
2450
2451// .sbttl "PAL_SAVE_STATE"
2452//+
2453//
2454// Pal_save_state
2455//
2456// Function
2457// All chip state saved, all PT's, SR's FR's, IPR's
2458//
2459//
2460// Regs' on entry...
2461//
2462// R0 = halt code
2463// pt0 = r0
2464// R1 = pointer to impure
2465// pt4 = r1
2466// R3 = return addr
2467// pt5 = r3
2468//
2469// register usage:
2470// r0 = halt_code
2471// r1 = addr of impure area
2472// r3 = return_address
2473// r4 = scratch
2474//
2475//-
2476
2477
2478 ALIGN_BLOCK
2479 .globl pal_save_state
2480pal_save_state:
2481//
2482//
2483// start of implementation independent save routine
2484//
2485// the impure area is larger than the addressibility of hw_ld and hw_st
2486// therefore, we need to play some games: The impure area
2487// is informally divided into the "machine independent" part and the
2488// "machine dependent" part. The state that will be saved in the
2489// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
2490// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
2491// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
2492// macros will automagically adjust the offset correctly.
2493//
2494
2495// The distributed code is commented out and followed by corresponding SRC code.
2496// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
2497
2498//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
2499 lda r1, 0x200(r1) // Point to center of CPU segment
2500//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
2501 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
2502//orig store_reg1 hlt, r0, r1, ipr=1
2503 SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
2504
2505 mfpr r0, pt0 // get r0 back //orig
2506//orig store_reg1 0, r0, r1 // save r0
2507 SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
2508
2509 mfpr r0, pt4 // get r1 back //orig
2510//orig store_reg1 1, r0, r1 // save r1
2511 SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
2512
2513//orig store_reg 2 // save r2
2514 SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
2515
2516 mfpr r0, pt5 // get r3 back //orig
2517//orig store_reg1 3, r0, r1 // save r3
2518 SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
2519
2520 // reason code has been saved
2521 // r0 has been saved
2522 // r1 has been saved
2523 // r2 has been saved
2524 // r3 has been saved
2525 // pt0, pt4, pt5 have been lost
2526
2527 //
2528 // Get out of shadow mode
2529 //
2530
2531 mfpr r2, icsr // Get icsr //orig
2532//orig ldah r0, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
2533 ldah r0, (1<<(icsr_v_sde-16))(r31)
2534 bic r2, r0, r0 // ICSR with SDE clear //orig
2535 mtpr r0, icsr // Turn off SDE //orig
2536
2537 mfpr r31, pt0 // SDE bubble cycle 1 //orig
2538 mfpr r31, pt0 // SDE bubble cycle 2 //orig
2539 mfpr r31, pt0 // SDE bubble cycle 3 //orig
2540 nop //orig
2541
2542
2543 // save integer regs R4-r31
2544//orig #define t 4
2545//orig .repeat 28
2546//orig store_reg \t
2547//orig #define t t + 1
2548//orig .endr
2549 SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
2550 SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
2551 SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
2552 SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
2553 SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
2554 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
2555 SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
2556 SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
2557 SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
2558 SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
2559 SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
2560 SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
2561 SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
2562 SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
2563 SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
2564 SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
2565 SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2566 SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2567 SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2568 SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2569 SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2570 SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2571 SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2572 SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2573 SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2574 SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2575 SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2576 SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2577
2578 // save all paltemp regs except pt0
2579
2580//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
2581//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
2582//orig #define t 1
2583//orig .repeat 23
2584//orig store_reg \t , pal=1
2585//orig #define t t + 1
2586//orig .endr
2587
2588 lda r1, -0x200(r1) // Restore the impure base address.
2589 lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
2590 SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
2591 SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
2592 SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
2593 SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
2594 SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
2595 SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
2596 SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
2597 SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
2598 SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
2599 SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
2600 SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
2601 SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
2602 SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
2603 SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
2604 SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
2605 SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
2606 SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
2607 SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
2608 SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
2609 SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
2610 SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2611 SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2612 SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2613 SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2614
2615 // Restore shadow mode
2616 mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write) //orig
2617 mfpr r31, pt0 //orig
2618 mtpr r2, icsr // Restore original ICSR //orig
2619
2620 mfpr r31, pt0 // SDE bubble cycle 1 //orig
2621 mfpr r31, pt0 // SDE bubble cycle 2 //orig
2622 mfpr r31, pt0 // SDE bubble cycle 3 //orig
2623 nop //orig
2624
2625 // save all integer shadow regs
2626
2627//orig #define t 8
2628//orig .repeat 7
2629//orig store_reg \t, shadow=1
2630//orig #define t t + 1
2631//orig .endr
2632//orig store_reg 25, shadow=1
2633
2634 SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
2635 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2636 SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2637 SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2638 SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2639 SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2640 SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2641 SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2642
2643//orig store_reg exc_addr, ipr=1 // save ipr
2644//orig store_reg pal_base, ipr=1 // save ipr
2645//orig store_reg mm_stat, ipr=1 // save ipr
2646//orig store_reg va, ipr=1 // save ipr
2647//orig store_reg icsr, ipr=1 // save ipr
2648//orig store_reg ipl, ipr=1 // save ipr
2649//orig store_reg ps, ipr=1 // save ipr
2650//orig store_reg itb_asn, ipr=1 // save ipr
2651//orig store_reg aster, ipr=1 // save ipr
2652//orig store_reg astrr, ipr=1 // save ipr
2653//orig store_reg sirr, ipr=1 // save ipr
2654//orig store_reg isr, ipr=1 // save ipr
2655//orig store_reg ivptbr, ipr=1 // save ipr
2656//orig store_reg mcsr, ipr=1 // save ipr
2657//orig store_reg dc_mode, ipr=1 // save ipr
2658
2659 SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2660 SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2661 SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
2662 SAVE_IPR(va,CNS_Q_VA,r1)
2663 SAVE_IPR(icsr,CNS_Q_ICSR,r1)
2664 SAVE_IPR(ipl,CNS_Q_IPL,r1)
2665 SAVE_IPR(ips,CNS_Q_IPS,r1)
2666 SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2667 SAVE_IPR(aster,CNS_Q_ASTER,r1)
2668 SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
2669 SAVE_IPR(sirr,CNS_Q_SIRR,r1)
2670 SAVE_IPR(isr,CNS_Q_ISR,r1)
2671 SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2672 SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
2673 SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2674
2675//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
2676//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
2677//orig // PVC violation applies only to
2678pvc$osf35$379: // loads. HW_ST ok here, so ignore
2679 SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
2680
2681
2682 //the following iprs are informational only -- will not be restored
2683
2684//orig store_reg icperr_stat, ipr=1
2685//orig store_reg pmctr, ipr=1
2686//orig store_reg intid, ipr=1
2687//orig store_reg exc_sum, ipr=1
2688//orig store_reg exc_mask, ipr=1
2689//orig ldah r14, 0xfff0(r31)
2690//orig zap r14, 0xE0, r14 // Get Cbox IPR base
2691//orig nop // pad mf dcperr_stat out of shadow of last store
2692//orig nop
2693//orig nop
2694//orig store_reg dcperr_stat, ipr=1
2695
2696 SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
2697 SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
2698 SAVE_IPR(intId,CNS_Q_INT_ID,r1)
2699 SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
2700 SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
2701 ldah r14, 0xFFF0(zero)
2702 zap r14, 0xE0, r14 // Get base address of CBOX IPRs
2703 NOP // Pad mfpr dcPerr out of shadow of
2704 NOP // last store
2705 NOP
2706 SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
2707
2708 // read cbox ipr state
2709
2710//orig mb
2711//orig ldqp r2, ev5__sc_ctl(r14)
2712//orig ldqp r13, ld_lock(r14)
2713//orig ldqp r4, ev5__sc_addr(r14)
2714//orig ldqp r5, ev5__ei_addr(r14)
2715//orig ldqp r6, ev5__bc_tag_addr(r14)
2716//orig ldqp r7, ev5__fill_syn(r14)
2717//orig bis r5, r4, r31
2718//orig bis r7, r6, r31 // make sure previous loads finish before reading stat registers which unlock them
2719//orig ldqp r8, ev5__sc_stat(r14) // unlocks sc_stat,sc_addr
2720//orig ldqp r9, ev5__ei_stat(r14) // may unlock ei_*, bc_tag_addr, fill_syn
2721//orig ldqp r31, ev5__ei_stat(r14) // ensures it is really unlocked
2722//orig mb
2723
2724#ifndef SIMOS
2725 mb
2726 ldq_p r2, scCtl(r14)
2727 ldq_p r13, ldLock(r14)
2728 ldq_p r4, scAddr(r14)
2729 ldq_p r5, eiAddr(r14)
2730 ldq_p r6, bcTagAddr(r14)
2731 ldq_p r7, fillSyn(r14)
2732 bis r5, r4, zero // Make sure all loads complete before
2733 bis r7, r6, zero // reading registers that unlock them.
2734 ldq_p r8, scStat(r14) // Unlocks scAddr.
2735 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
2736 ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
2737 mb
2738#endif
2739//orig // save cbox ipr state
2740//orig store_reg1 sc_ctl, r2, r1, ipr=1
2741//orig store_reg1 ld_lock, r13, r1, ipr=1
2742//orig store_reg1 sc_addr, r4, r1, ipr=1
2743//orig store_reg1 ei_addr, r5, r1, ipr=1
2744//orig store_reg1 bc_tag_addr, r6, r1, ipr=1
2745//orig store_reg1 fill_syn, r7, r1, ipr=1
2746//orig store_reg1 sc_stat, r8, r1, ipr=1
2747//orig store_reg1 ei_stat, r9, r1, ipr=1
2748//orig //bc_config? sl_rcv?
2749
2750 SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
2751 SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
2752 SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
2753 SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
2754 SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
2755 SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
2756 SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
2757 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
2758
2759// restore impure base //orig
2760//orig unfix_impure_ipr r1
2761 lda r1, -CNS_Q_IPR(r1)
2762
2763// save all floating regs //orig
2764 mfpr r0, icsr // get icsr //orig
2765 or r31, 1, r2 // get a one //orig
2766//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot //orig
2767 sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
2768 or r2, r0, r0 // set FEN on //orig
2769 mtpr r0, icsr // write to icsr, enabling FEN //orig
2770
2771// map the save area virtually
2772// orig mtpr r31, dtb_ia // clear the dtb
2773// orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
2774// orig sll r0, 32, r0 // shift to PFN field
2775// orig lda r2, 0xff(r31) // all read enable and write enable bits set
2776// orig sll r2, 8, r2 // move to PTE location
2777// orig addq r0, r2, r0 // combine with PFN
2778// orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2779// orig mtpr r1, dtb_tag // write TB tag
2780
2781 mtpr r31, dtbIa // Clear all DTB entries
2782 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2783 sll r0, pte_v_pfn, r0 // Shift to form PFN
2784 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2785 mtpr r0, dtbPte // Load the PTE and set valid
2786 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2787
2788
2789//orig // map the next page too - in case the impure area crosses a page boundary
2790//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
2791//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
2792//orig sll r0, 32, r0 // shift to PFN field
2793//orig lda r2, 0xff(r31) // all read enable and write enable bits set
2794//orig sll r2, 8, r2 // move to PTE location
2795//orig addq r0, r2, r0 // combine with PFN
2796//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2797//orig mtpr r4, dtb_tag // write TB tag
2798
2799 lda r4, (1<<va_s_off)(r1) // Generate address for next page
2800 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2801 sll r0, pte_v_pfn, r0 // Shift to form PFN
2802 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2803 mtpr r0, dtbPte // Load the PTE and set valid
2804 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2805
2806 sll r31, 0, r31 // stall cycle 1 // orig
2807 sll r31, 0, r31 // stall cycle 2 // orig
2808 sll r31, 0, r31 // stall cycle 3 // orig
2809 nop // orig
2810
2811//orig // add offset for saving fpr regs
2812//orig fix_impure_gpr r1
2813
2814 lda r1, 0x200(r1) // Point to center of CPU segment
2815
2816// now save the regs - F0-F31
2817
2818//orig #define t 0
2819//orig .repeat 32
2820//orig store_reg \t , fpu=1
2821//orig #define t t + 1
2822//orig .endr
2823
2824 mf_fpcr f0 // original
2825
2826 SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
2827 SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
2828 SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
2829 SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
2830 SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
2831 SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
2832 SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
2833 SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
2834 SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
2835 SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
2836 SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
2837 SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
2838 SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
2839 SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
2840 SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
2841 SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
2842 SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
2843 SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
2844 SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
2845 SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
2846 SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2847 SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2848 SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2849 SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2850 SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2851 SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2852 SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2853 SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2854 SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2855 SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2856 SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2857 SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2858
2859//orig //switch impure offset from gpr to ipr---
2860//orig unfix_impure_gpr r1
2861//orig fix_impure_ipr r1
2862//orig store_reg1 fpcsr, f0, r1, fpcsr=1
2863
2864 SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach// pb
2865 lda r1, -0x200(r1) // Restore the impure base address
2866
2867//orig // and back to gpr ---
2868//orig unfix_impure_ipr r1
2869//orig fix_impure_gpr r1
2870
2871//orig lda r0, cns_mchksize(r31) // get size of mchk area
2872//orig store_reg1 mchkflag, r0, r1, ipr=1
2873//orig mb
2874
2875 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
2876 // save this using the IPR base (it is closer) not the GRP base as they used...pb
2877 lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
2878 SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2879 mb
2880
2881//orig or r31, 1, r0 // get a one
2882//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
2883//orig mb
2884
2885 lda r1, -CNS_Q_IPR(r1) // back to the base
2886 lda r1, 0x200(r1) // Point to center of CPU segment
2887 or r31, 1, r0 // get a one
2888 SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
2889 mb
2890
2891//orig // restore impure area base
2892//orig unfix_impure_gpr r1
2893 lda r1, -0x200(r1) // Point to center of CPU segment
2894
2895 mtpr r31, dtb_ia // clear the dtb //orig
2896 mtpr r31, itb_ia // clear the itb //orig
2897
2898//orig pvc_jsr savsta, bsr=1, dest=1
2899 ret r31, (r3) // and back we go
2900#endif
2901
2902
2903#if remove_restore_state == 0
2904
2905
2906// .sbttl "PAL_RESTORE_STATE"
2907//+
2908//
2909// Pal_restore_state
2910//
2911//
2912// register usage:
2913// r1 = addr of impure area
2914// r3 = return_address
2915// all other regs are scratchable, as they are about to
2916// be reloaded from ram.
2917//
2918// Function:
2919// All chip state restored, all SRs, FRs, PTs, IPRs
2920// *** except R1, R3, PT0, PT4, PT5 ***
2921//
2922//-
2923 ALIGN_BLOCK
2924pal_restore_state:
2925
2926//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2927
2928//orig // map the console io area virtually
2929//orig mtpr r31, dtb_ia // clear the dtb
2930//orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
2931//orig sll r0, 32, r0 // shift to PFN field
2932//orig lda r2, 0xff(r31) // all read enable and write enable bits set
2933//orig sll r2, 8, r2 // move to PTE location
2934//orig addq r0, r2, r0 // combine with PFN
2935//orig
2936//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2937//orig mtpr r1, dtb_tag // write TB tag
2938//orig
2939
2940 mtpr r31, dtbIa // Clear all DTB entries
2941 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2942 sll r0, pte_v_pfn, r0 // Shift to form PFN
2943 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2944 mtpr r0, dtbPte // Load the PTE and set valid
2945 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2946
2947
2948//orig // map the next page too, in case impure area crosses page boundary
2949//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
2950//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
2951//orig sll r0, 32, r0 // shift to PFN field
2952//orig lda r2, 0xff(r31) // all read enable and write enable bits set
2953//orig sll r2, 8, r2 // move to PTE location
2954//orig addq r0, r2, r0 // combine with PFN
2955//orig
2956//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2957//orig mtpr r4, dtb_tag // write TB tag - no virtual mbox instruction for 3 cycles
2958
2959 lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
2960 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2961 sll r0, pte_v_pfn, r0 // Shift to form PFN
2962 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2963 mtpr r0, dtbPte // Load the PTE and set valid
2964 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2965
2966//orig // save all floating regs
2967//orig mfpr r0, icsr // get icsr
2968//orig// assume ICSR_V_SDE gt <ICSR_V_FPE> // assertion checker
2969//orig or r31, <<1@<ICSR_V_SDE-ICSR_V_FPE>> ! 1>, r2 // set SDE and FPE
2970//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot
2971//orig or r2, r0, r0 // set FEN on
2972//orig mtpr r0, icsr // write to icsr, enabling FEN and SDE. 3 bubbles to floating instr.
2973
2974 mfpr r0, icsr // Get current ICSR
2975 bis zero, 1, r2 // Get a '1'
2976 or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
2977 sll r2, icsr_v_fpe, r2 // Shift bits into position
2978 bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
2979 mtpr r0, icsr // Update the chip
2980
2981 mfpr r31, pt0 // FPE bubble cycle 1 //orig
2982 mfpr r31, pt0 // FPE bubble cycle 2 //orig
2983 mfpr r31, pt0 // FPE bubble cycle 3 //orig
2984
2985//orig fix_impure_ipr r1
2986//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
2987//orig mt_fpcr f0
2988//orig
2989//orig unfix_impure_ipr r1
2990//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
2991//orig
2992//orig // restore all floating regs
2993//orig#define t 0
2994//orig .repeat 32
2995//orig restore_reg \t , fpu=1
2996//orig#define t t + 1
2997//orig .endr
2998
2999 lda r1, 200(r1) // Point to base of IPR area again
3000 RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
3001 mt_fpcr f0 // original
3002
3003 lda r1, 0x200(r1) // point to center of CPU segment
3004 RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
3005 RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
3006 RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
3007 RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
3008 RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
3009 RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
3010 RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
3011 RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
3012 RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
3013 RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
3014 RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
3015 RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
3016 RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
3017 RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
3018 RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
3019 RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
3020 RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
3021 RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
3022 RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
3023 RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
3024 RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
3025 RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
3026 RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
3027 RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
3028 RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
3029 RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
3030 RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
3031 RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
3032 RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
3033 RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
3034 RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
3035 RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
3036
3037//orig // switch impure pointer from gpr to ipr area --
3038//orig unfix_impure_gpr r1
3039//orig fix_impure_ipr r1
3040//orig
3041//orig // restore all pal regs
3042//orig#define t 1
3043//orig .repeat 23
3044//orig restore_reg \t , pal=1
3045//orig#define t t + 1
3046//orig .endr
3047
3048 lda r1, -0x200(r1) // Restore base address of impure area.
3049 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
3050 RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
3051 RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
3052 RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
3053 RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
3054 RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
3055 RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
3056 RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
3057 RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
3058 RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
3059 RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
3060 RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
3061 RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
3062 RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
3063 RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
3064 RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
3065 RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
3066 RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
3067 RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
3068 RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
3069 RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
3070 RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
3071 RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
3072 RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
3073 RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
3074
3075
3076//orig restore_reg exc_addr, ipr=1 // restore ipr
3077//orig restore_reg pal_base, ipr=1 // restore ipr
3078//orig restore_reg ipl, ipr=1 // restore ipr
3079//orig restore_reg ps, ipr=1 // restore ipr
3080//orig mtpr r0, dtb_cm // set current mode in mbox too
3081//orig restore_reg itb_asn, ipr=1
3082//orig srl r0, itb_asn_v_asn, r0
3083//orig sll r0, dtb_asn_v_asn, r0
3084//orig mtpr r0, dtb_asn // set ASN in Mbox too
3085//orig restore_reg ivptbr, ipr=1
3086//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
3087//orig restore_reg mcsr, ipr=1
3088//orig restore_reg aster, ipr=1
3089//orig restore_reg astrr, ipr=1
3090//orig restore_reg sirr, ipr=1
3091//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
3092//orig mfpr r31, pt0 // (may issue with mt maf_mode)
3093//orig mfpr r31, pt0 // bubble cycle 1
3094//orig mfpr r31, pt0 // bubble cycle 2
3095//orig mfpr r31, pt0 // bubble cycle 3
3096//orig mfpr r31, pt0 // (may issue with following ld)
3097
3098 // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
3099 RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
3100 RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
3101 RESTORE_IPR(ipl,CNS_Q_IPL,r1)
3102 RESTORE_IPR(ips,CNS_Q_IPS,r1)
3103 mtpr r0, dtbCm // Set Mbox current mode too.
3104 RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
3105 srl r0, 4, r0
3106 sll r0, 57, r0
3107 mtpr r0, dtbAsn // Set Mbox ASN too
3108 RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
3109 mtpr r0, mVptBr // Set Mbox VptBr too
3110 RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
3111 RESTORE_IPR(aster,CNS_Q_ASTER,r1)
3112 RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
3113 RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
3114 RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
3115 STALL
3116 STALL
3117 STALL
3118 STALL
3119 STALL
3120
3121
3122 // restore all integer shadow regs
3123//orig#define t 8
3124//orig .repeat 7
3125//orig restore_reg \t, shadow=1
3126//orig#define t t + 1
3127//orig .endr
3128//orig restore_reg 25, shadow=1
3129//orig restore_reg dc_mode, ipr=1 // no mbox instructions for 4 cycles
3130
3131 RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
3132 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
3133 RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
3134 RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
3135 RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
3136 RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
3137 RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
3138 RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
3139 RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
3140
3141 //
3142 // Get out of shadow mode
3143 //
3144
3145 mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway) //orig
3146 mfpr r31, pt0 // "" //orig
3147 mfpr r0, icsr // Get icsr //orig
3148//orig ldah r2, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
3149 ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location //orig
3150 bic r0, r2, r2 // ICSR with SDE clear //orig
3151 mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles //orig
3152
3153 mfpr r31, pt0 // SDE bubble cycle 1 //orig
3154 mfpr r31, pt0 // SDE bubble cycle 2 //orig
3155 mfpr r31, pt0 // SDE bubble cycle 3 //orig
3156 nop //orig
3157
3158//orig // switch impure pointer from ipr to gpr area --
3159//orig unfix_impure_ipr r1
3160//orig fix_impure_gpr r1
3161//orig // restore all integer regs
3162//orig#define t 4
3163//orig .repeat 28
3164//orig restore_reg \t
3165//orig#define t t + 1
3166//orig .endr
3167
3168// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
3169
3170 lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
3171 lda r1, 0x200(r1) // Point to center of CPU segment
3172
3173 RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
3174 RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
3175 RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
3176 RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
3177 RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
3178 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
3179 RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
3180 RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
3181 RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
3182 RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
3183 RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
3184 RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
3185 RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
3186 RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
3187 RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
3188 RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
3189 RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
3190 RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
3191 RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
3192 RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
3193 RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
3194 RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
3195 RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
3196 RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
3197 RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
3198 RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
3199 RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
3200 RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
3201
3202//orig // switch impure pointer from gpr to ipr area --
3203//orig unfix_impure_gpr r1
3204//orig fix_impure_ipr r1
3205//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
3206
3207 lda t0, -0x200(t0) // Restore base address of impure area.
3208 lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
3209 RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
3210
3211//orig // and back again --
3212//orig unfix_impure_ipr r1
3213//orig fix_impure_gpr r1
3214//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
3215//orig mb
3216
3217 lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
3218 lda t0, 0x200(t0) // and back to center of CPU segment
3219 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
3220 mb
3221
3222//orig // and back we go
3223//orig// restore_reg 3
3224//orig restore_reg 2
3225//orig// restore_reg 1
3226//orig restore_reg 0
3227//orig // restore impure area base
3228//orig unfix_impure_gpr r1
3229
3230 RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
3231 RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
3232 lda r1, -0x200(r1) // Restore impure base address
3233
3234 mfpr r31, pt0 // stall for ldqp above //orig
3235
3236 mtpr r31, dtb_ia // clear the tb //orig
3237 mtpr r31, itb_ia // clear the itb //orig
3238
3239//orig pvc_jsr rststa, bsr=1, dest=1
3240 ret r31, (r3) // back we go //orig
3241#endif
3242
3243
3244//+
3245// pal_pal_bug_check -- code has found a bugcheck situation.
3246// Set things up and join common machine check flow.
3247//
3248// Input:
3249// r14 - exc_addr
3250//
3251// On exit:
3252// pt0 - saved r0
3253// pt1 - saved r1
3254// pt4 - saved r4
3255// pt5 - saved r5
3256// pt6 - saved r6
3257// pt10 - saved exc_addr
3258// pt_misc<47:32> - mchk code
3259// pt_misc<31:16> - scb vector
3260// r14 - base of Cbox IPRs in IO space
3261// MCES<mchk> is set
3262//-
3263
3264 ALIGN_BLOCK
3265 .globl pal_pal_bug_check_from_int
3266pal_pal_bug_check_from_int:
3267 DEBUGSTORE(0x79)
3268//simos DEBUG_EXC_ADDR()
3269 DEBUGSTORE(0x20)
3270//simos bsr r25, put_hex
3271 lda r25, mchk_c_bugcheck(r31)
3272 addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
3273 br r31, pal_pal_mchk
3274 nop
3275
3276pal_pal_bug_check:
3277 lda r25, mchk_c_bugcheck(r31)
3278
3279pal_pal_mchk:
3280 sll r25, 32, r25 // Move mchk code to position
3281
3282 mtpr r14, pt10 // Stash exc_addr
3283 mtpr r14, exc_addr
3284
3285 mfpr r12, pt_misc // Get MCES and scratch
3286 zap r12, 0x3c, r12
3287
3288 or r12, r25, r12 // Combine mchk code
3289 lda r25, scb_v_procmchk(r31) // Get SCB vector
3290
3291 sll r25, 16, r25 // Move SCBv to position
3292 or r12, r25, r25 // Combine SCBv
3293
3294 mtpr r0, pt0 // Stash for scratch
3295 bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
3296
3297 mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
3298 ldah r14, 0xfff0(r31)
3299
3300 mtpr r1, pt1 // Stash for scratch
3301 zap r14, 0xE0, r14 // Get Cbox IPR base
3302
3303 mtpr r4, pt4
3304 mtpr r5, pt5
3305
3306 mtpr r6, pt6
3307 blbs r12, sys_double_machine_check // MCHK halt if double machine check
3308
3309 br r31, sys_mchk_collect_iprs // Join common machine check flow
3310
3311// align_to_call_pal_section // Align to address of first call_pal entry point - 2000
3312
3313// .sbttl "HALT - PALcode for HALT instruction"
3314
3315//+
3316//
3317// Entry:
3318// Vectored into via hardware PALcode instruction dispatch.
3319//
3320// Function:
3321// GO to console code
3322//
3323//-
3324
3325 .text 1
3326// . = 0x2000
3327 CALL_PAL_PRIV(PAL_HALT_ENTRY)
3328call_pal_halt:
3329#if rax_mode == 0
3330 mfpr r31, pt0 // Pad exc_addr read
3331 mfpr r31, pt0
3332
3333 mfpr r12, exc_addr // get PC
3334 subq r12, 4, r12 // Point to the HALT
3335
3336 mtpr r12, exc_addr
3337 mtpr r0, pt0
3338
3339//orig pvc_jsr updpcb, bsr=1
3340 bsr r0, pal_update_pcb // update the pcb
3341 lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
3342 br r31, sys_enter_console // enter the console
3343
3344#else // RAX mode
3345 mb
3346 mb
3347 mtpr r9, ev5__dtb_asn // no Dstream virtual ref for next 3 cycles.
3348 mtpr r9, ev5__itb_asn // E1. Update ITB ASN. No hw_rei for 5 cycles.
3349 mtpr r8, exc_addr // no HW_REI for 1 cycle.
3350 blbc r9, not_begin_case
3351 mtpr r31, ev5__dtb_ia // clear DTB. No Dstream virtual ref for 2 cycles.
3352 mtpr r31, ev5__itb_ia // clear ITB.
3353
3354not_begin_case:
3355 nop
3356 nop
3357
3358 nop
3359 nop // pad mt itb_asn ->hw_rei_stall
3360
3361 hw_rei_stall
3362#endif
3363
3364// .sbttl "CFLUSH- PALcode for CFLUSH instruction"
3365
3366//+
3367//
3368// Entry:
3369// Vectored into via hardware PALcode instruction dispatch.
3370//
3371// R16 - contains the PFN of the page to be flushed
3372//
3373// Function:
3374// Flush all Dstream caches of 1 entire page
3375// The CFLUSH routine is in the system specific module.
3376//
3377//-
3378
3379 CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
3380Call_Pal_Cflush:
3381 br r31, sys_cflush
3382
3383// .sbttl "DRAINA - PALcode for DRAINA instruction"
3384//+
3385//
3386// Entry:
3387// Vectored into via hardware PALcode instruction dispatch.
3388// Implicit TRAPB performed by hardware.
3389//
3390// Function:
3391// Stall instruction issue until all prior instructions are guaranteed to
3392// complete without incurring aborts. For the EV5 implementation, this
3393// means waiting until all pending DREADS are returned.
3394//
3395//-
3396
3397 CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
3398Call_Pal_Draina:
3399 ldah r14, 0x100(r31) // Init counter. Value?
3400 nop
3401
3402DRAINA_LOOP:
3403 subq r14, 1, r14 // Decrement counter
3404 mfpr r13, ev5__maf_mode // Fetch status bit
3405
3406 srl r13, maf_mode_v_dread_pending, r13
3407 ble r14, DRAINA_LOOP_TOO_LONG
3408
3409 nop
3410 blbs r13, DRAINA_LOOP // Wait until all DREADS clear
3411
3412 hw_rei
3413
3414DRAINA_LOOP_TOO_LONG:
3415 br r31, call_pal_halt
3416
3417// .sbttl "CALL_PAL OPCDECs"
3418
3419 CALL_PAL_PRIV(0x0003)
3420CallPal_OpcDec03:
3421 br r31, osfpal_calpal_opcdec
3422
3423 CALL_PAL_PRIV(0x0004)
3424CallPal_OpcDec04:
3425 br r31, osfpal_calpal_opcdec
3426
3427 CALL_PAL_PRIV(0x0005)
3428CallPal_OpcDec05:
3429 br r31, osfpal_calpal_opcdec
3430
3431 CALL_PAL_PRIV(0x0006)
3432CallPal_OpcDec06:
3433 br r31, osfpal_calpal_opcdec
3434
3435 CALL_PAL_PRIV(0x0007)
3436CallPal_OpcDec07:
3437 br r31, osfpal_calpal_opcdec
3438
3439 CALL_PAL_PRIV(0x0008)
3440CallPal_OpcDec08:
3441 br r31, osfpal_calpal_opcdec
3442
3443// .sbttl "CSERVE- PALcode for CSERVE instruction"
3444//+
3445//
3446// Entry:
3447// Vectored into via hardware PALcode instruction dispatch.
3448//
3449// Function:
3450// Various functions for private use of console software
3451//
3452// option selector in r0
3453// arguments in r16....
3454// The CSERVE routine is in the system specific module.
3455//
3456//-
3457
3458 CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
3459Call_Pal_Cserve:
3460 br r31, sys_cserve
3461
3462// .sbttl "swppal - PALcode for swppal instruction"
3463
3464//+
3465//
3466// Entry:
3467// Vectored into via hardware PALcode instruction dispatch.
3468// Vectored into via hardware PALcode instruction dispatch.
3469// R16 contains the new PAL identifier
3470// R17:R21 contain implementation-specific entry parameters
3471//
3472// R0 receives status:
3473// 0 success (PAL was switched)
3474// 1 unknown PAL variant
3475// 2 known PAL variant, but PAL not loaded
3476//
3477//
3478// Function:
3479// Swap control to another PAL.
3480//-
3481
3482 CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
3483Call_Pal_Swppal:
3484 cmpule r16, 255, r0 // see if a kibble was passed
3485 cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
3486
3487 or r16, r31, r3 // set r3 incase this is a address
3488 blbc r0, swppal_cont // nope, try it as an address
3489
3490 cmpeq r16, 2, r0 // is it our friend OSF?
3491 blbc r0, swppal_fail // nope, don't know this fellow
3492
3493 br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
3494
3495// .global osfpal_hw_entry_reset
3496// .weak osfpal_hw_entry_reset
3497// .long <osfpal_hw_entry_reset-pal_start>
3498//orig halt // don't know how to get the address here - kludge ok, load pal at 0
3499 .long 0 // ?? hack upon hack...pb
3500
3501CALL_PAL_SWPPAL_10_: ldlp r3, 0(r2) // fetch target addr
3502// ble r3, swppal_fail ; if OSF not linked in say not loaded.
3503 mfpr r2, pal_base // fetch pal base
3504
3505 addq r2, r3, r3 // add pal base
3506 lda r2, 0x3FFF(r31) // get pal base checker mask
3507
3508 and r3, r2, r2 // any funky bits set?
3509 cmpeq r2, 0, r0 //
3510
3511 blbc r0, swppal_fail // return unknown if bad bit set.
3512 br r31, swppal_cont
3513
3514// .sbttl "CALL_PAL OPCDECs"
3515
3516 CALL_PAL_PRIV(0x000B)
3517CallPal_OpcDec0B:
3518 br r31, osfpal_calpal_opcdec
3519
3520 CALL_PAL_PRIV(0x000C)
3521CallPal_OpcDec0C:
3522 br r31, osfpal_calpal_opcdec
3523
3524// .sbttl "wripir- PALcode for wripir instruction"
3525//+
3526//
3527// Entry:
3528// Vectored into via hardware PALcode instruction dispatch.
3529// r16 = processor number to interrupt
3530//
3531// Function:
3532// IPIR <- R16
3533// Handled in system-specific code
3534//
3535// Exit:
3536// interprocessor interrupt is recorded on the target processor
3537// and is initiated when the proper enabling conditions are present.
3538//-
3539
3540 CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
3541Call_Pal_Wrpir:
3542 br r31, sys_wripir
3543
3544// .sbttl "CALL_PAL OPCDECs"
3545
3546 CALL_PAL_PRIV(0x000E)
3547CallPal_OpcDec0E:
3548 br r31, osfpal_calpal_opcdec
3549
3550 CALL_PAL_PRIV(0x000F)
3551CallPal_OpcDec0F:
3552 br r31, osfpal_calpal_opcdec
3553
3554// .sbttl "rdmces- PALcode for rdmces instruction"
3555
3556//+
3557//
3558// Entry:
3559// Vectored into via hardware PALcode instruction dispatch.
3560//
3561// Function:
3562// R0 <- ZEXT(MCES)
3563//-
3564
3565 CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
3566Call_Pal_Rdmces:
3567 mfpr r0, pt_mces // Read from PALtemp
3568 and r0, mces_m_all, r0 // Clear other bits
3569
3570 hw_rei
3571
3572// .sbttl "wrmces- PALcode for wrmces instruction"
3573
3574//+
3575//
3576// Entry:
3577// Vectored into via hardware PALcode instruction dispatch.
3578//
3579// Function:
3580// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
3581// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
3582// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
3583// MCES<3> <- R16<3> (DPC)
3584// MCES<4> <- R16<4> (DSC)
3585//
3586//-
3587
3588 CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
3589Call_Pal_Wrmces:
3590 and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
3591 mfpr r14, pt_mces // Get current value
3592
3593 ornot r31, r13, r13 // Flip all the bits
3594 and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
3595
3596 and r14, r13, r1 // Update MCHK, SCE, PCE
3597 bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
3598
3599 or r1, r17, r1 // Update DPC and DSC
3600 mtpr r1, pt_mces // Write MCES back
3601
3602#if rawhide_system == 0
3603 nop // Pad to fix PT write->read restriction
3604#else
3605 blbs r16, RAWHIDE_clear_mchk_lock // Clear logout from lock
3606#endif
3607
3608 nop
3609 hw_rei
3610
3611
3612
3613// .sbttl "CALL_PAL OPCDECs"
3614
3615 CALL_PAL_PRIV(0x0012)
3616CallPal_OpcDec12:
3617 br r31, osfpal_calpal_opcdec
3618
3619 CALL_PAL_PRIV(0x0013)
3620CallPal_OpcDec13:
3621 br r31, osfpal_calpal_opcdec
3622
3623 CALL_PAL_PRIV(0x0014)
3624CallPal_OpcDec14:
3625 br r31, osfpal_calpal_opcdec
3626
3627 CALL_PAL_PRIV(0x0015)
3628CallPal_OpcDec15:
3629 br r31, osfpal_calpal_opcdec
3630
3631 CALL_PAL_PRIV(0x0016)
3632CallPal_OpcDec16:
3633 br r31, osfpal_calpal_opcdec
3634
3635 CALL_PAL_PRIV(0x0017)
3636CallPal_OpcDec17:
3637 br r31, osfpal_calpal_opcdec
3638
3639 CALL_PAL_PRIV(0x0018)
3640CallPal_OpcDec18:
3641 br r31, osfpal_calpal_opcdec
3642
3643 CALL_PAL_PRIV(0x0019)
3644CallPal_OpcDec19:
3645 br r31, osfpal_calpal_opcdec
3646
3647 CALL_PAL_PRIV(0x001A)
3648CallPal_OpcDec1A:
3649 br r31, osfpal_calpal_opcdec
3650
3651 CALL_PAL_PRIV(0x001B)
3652CallPal_OpcDec1B:
3653 br r31, osfpal_calpal_opcdec
3654
3655 CALL_PAL_PRIV(0x001C)
3656CallPal_OpcDec1C:
3657 br r31, osfpal_calpal_opcdec
3658
3659 CALL_PAL_PRIV(0x001D)
3660CallPal_OpcDec1D:
3661 br r31, osfpal_calpal_opcdec
3662
3663 CALL_PAL_PRIV(0x001E)
3664CallPal_OpcDec1E:
3665 br r31, osfpal_calpal_opcdec
3666
3667 CALL_PAL_PRIV(0x001F)
3668CallPal_OpcDec1F:
3669 br r31, osfpal_calpal_opcdec
3670
3671 CALL_PAL_PRIV(0x0020)
3672CallPal_OpcDec20:
3673 br r31, osfpal_calpal_opcdec
3674
3675 CALL_PAL_PRIV(0x0021)
3676CallPal_OpcDec21:
3677 br r31, osfpal_calpal_opcdec
3678
3679 CALL_PAL_PRIV(0x0022)
3680CallPal_OpcDec22:
3681 br r31, osfpal_calpal_opcdec
3682
3683 CALL_PAL_PRIV(0x0023)
3684CallPal_OpcDec23:
3685 br r31, osfpal_calpal_opcdec
3686
3687 CALL_PAL_PRIV(0x0024)
3688CallPal_OpcDec24:
3689 br r31, osfpal_calpal_opcdec
3690
3691 CALL_PAL_PRIV(0x0025)
3692CallPal_OpcDec25:
3693 br r31, osfpal_calpal_opcdec
3694
3695 CALL_PAL_PRIV(0x0026)
3696CallPal_OpcDec26:
3697 br r31, osfpal_calpal_opcdec
3698
3699 CALL_PAL_PRIV(0x0027)
3700CallPal_OpcDec27:
3701 br r31, osfpal_calpal_opcdec
3702
3703 CALL_PAL_PRIV(0x0028)
3704CallPal_OpcDec28:
3705 br r31, osfpal_calpal_opcdec
3706
3707 CALL_PAL_PRIV(0x0029)
3708CallPal_OpcDec29:
3709 br r31, osfpal_calpal_opcdec
3710
3711 CALL_PAL_PRIV(0x002A)
3712CallPal_OpcDec2A:
3713 br r31, osfpal_calpal_opcdec
3714
3715// .sbttl "wrfen - PALcode for wrfen instruction"
3716
3717//+
3718//
3719// Entry:
3720// Vectored into via hardware PALcode instruction dispatch.
3721//
3722// Function:
3723// a0<0> -> ICSR<FPE>
3724// Store new FEN in PCB
3725// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16) are UNPREDICTABLE
3726//
3727// Issue: What about pending FP loads when FEN goes from on->off????
3728//-
3729
3730 CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
3731Call_Pal_Wrfen:
3732 or r31, 1, r13 // Get a one
3733 mfpr r1, ev5__icsr // Get current FPE
3734
3735 sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
3736 and r16, 1, r16 // clean new fen
3737
3738 sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
3739 bic r1, r13, r1 // zero icsr<fpe>
3740
3741 or r1, r12, r1 // Or new FEN into ICSR
3742 mfpr r12, pt_pcbb // Get PCBB - E1
3743
3744 mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
3745 stlp r16, osfpcb_q_fen(r12) // Store FEN in PCB.
3746
3747 mfpr r31, pt0 // Pad ICSR<FPE> write.
3748 mfpr r31, pt0
3749
3750 mfpr r31, pt0
3751// pvc_violate 225 // cuz PVC can't distinguish which bits changed
3752 hw_rei
3753
3754
3755 CALL_PAL_PRIV(0x002C)
3756CallPal_OpcDec2C:
3757 br r31, osfpal_calpal_opcdec
3758
3759// .sbttl "wrvptpr - PALcode for wrvptpr instruction"
3760//+
3761//
3762// Entry:
3763// Vectored into via hardware PALcode instruction dispatch.
3764//
3765// Function:
3766// vptptr <- a0 (r16)
3767//-
3768
3769 CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
3770Call_Pal_Wrvptptr:
3771 mtpr r16, ev5__mvptbr // Load Mbox copy
3772 mtpr r16, ev5__ivptbr // Load Ibox copy
3773 nop // Pad IPR write
3774 nop
3775 hw_rei
3776
3777 CALL_PAL_PRIV(0x002E)
3778CallPal_OpcDec2E:
3779 br r31, osfpal_calpal_opcdec
3780
3781 CALL_PAL_PRIV(0x002F)
3782CallPal_OpcDec2F:
3783 br r31, osfpal_calpal_opcdec
3784
3785// .sbttl "swpctx- PALcode for swpctx instruction"
3786
3787//+
3788//
3789// Entry:
3790// hardware dispatch via callPal instruction
3791// R16 -> new pcb
3792//
3793// Function:
3794// dynamic state moved to old pcb
3795// new state loaded from new pcb
3796// pcbb pointer set
3797// old pcbb returned in R0
3798//
3799// Note: need to add perf monitor stuff
3800//-
3801
3802 CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
3803Call_Pal_Swpctx:
3804 rpcc r13 // get cyccounter
3805 mfpr r0, pt_pcbb // get pcbb
3806
3807 ldqp r22, osfpcb_q_fen(r16) // get new fen/pme
3808 ldqp r23, osfpcb_l_cc(r16) // get new asn
3809
3810 srl r13, 32, r25 // move offset
3811 mfpr r24, pt_usp // get usp
3812
3813 stqp r30, osfpcb_q_ksp(r0) // store old ksp
3814// pvc_violate 379 // stqp can't trap except replay. only problem if mf same ipr in same shadow.
3815 mtpr r16, pt_pcbb // set new pcbb
3816
3817 stqp r24, osfpcb_q_usp(r0) // store usp
3818 addl r13, r25, r25 // merge for new time
3819
3820 stlp r25, osfpcb_l_cc(r0) // save time
3821 ldah r24, (1<<(icsr_v_fpe-16))(r31)
3822
3823 and r22, 1, r12 // isolate fen
3824 mfpr r25, icsr // get current icsr
3825
3826 ev5_pass2 lda r24, (1<<icsr_v_pmp)(r24)
3827 br r31, swpctx_cont
3828
3829// .sbttl "wrval - PALcode for wrval instruction"
3830//+
3831//
3832// Entry:
3833// Vectored into via hardware PALcode instruction dispatch.
3834//
3835// Function:
3836// sysvalue <- a0 (r16)
3837//-
3838
3839 CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
3840Call_Pal_Wrval:
3841 nop
3842 mtpr r16, pt_sysval // Pad paltemp write
3843 nop
3844 nop
3845 hw_rei
3846
3847
3848// .sbttl "rdval - PALcode for rdval instruction"
3849
3850//+
3851//
3852// Entry:
3853// Vectored into via hardware PALcode instruction dispatch.
3854//
3855// Function:
3856// v0 (r0) <- sysvalue
3857//-
3858
3859 CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
3860Call_Pal_Rdval:
3861 nop
3862 mfpr r0, pt_sysval
3863 nop
3864 hw_rei
3865
3866// .sbttl "tbi - PALcode for tbi instruction"
3867//+
3868//
3869// Entry:
3870// Vectored into via hardware PALcode instruction dispatch.
3871//
3872// Function:
3873// TB invalidate
3874// r16/a0 = TBI type
3875// r17/a1 = Va for TBISx instructions
3876//-
3877
3878 CALL_PAL_PRIV(PAL_TBI_ENTRY)
3879Call_Pal_Tbi:
3880 addq r16, 2, r16 // change range to 0-2
3881 br r23, CALL_PAL_tbi_10_ // get our address
3882
3883CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
3884 lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
3885 sll r16, 4, r16 // * 16
3886 blbc r22, CALL_PAL_tbi_30_ // go rei, if not
3887
3888 addq r23, r16, r23 // addr of our code
3889//orig pvc_jsr tbi
3890 jmp r31, (r23) // and go do it
3891
3892CALL_PAL_tbi_30_:
3893 hw_rei
3894 nop
3895
3896// .sbttl "wrent - PALcode for wrent instruction"
3897//+
3898//
3899// Entry:
3900// Vectored into via hardware PALcode instruction dispatch.
3901//
3902// Function:
3903// Update ent* in paltemps
3904// r16/a0 = Address of entry routine
3905// r17/a1 = Entry Number 0..5
3906//
3907// r22, r23 trashed
3908//-
3909
3910 CALL_PAL_PRIV(PAL_WRENT_ENTRY)
3911Call_Pal_Wrent:
3912 cmpult r17, 6, r22 // see if in range
3913 br r23, CALL_PAL_wrent_10_ // get our address
3914
3915CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
3916 blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
3917
3918 lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
3919 sll r17, 4, r17 // *16
3920
3921 addq r17, r23, r23 // Get address in table
3922//orig pvc_jsr wrent
3923 jmp r31, (r23) // and go do it
3924
3925CALL_PAL_wrent_30_:
3926 hw_rei // out of range, just return
3927
3928// .sbttl "swpipl - PALcode for swpipl instruction"
3929//+
3930//
3931// Entry:
3932// Vectored into via hardware PALcode instruction dispatch.
3933//
3934// Function:
3935// v0 (r0) <- PS<IPL>
3936// PS<IPL> <- a0<2:0> (r16)
3937//
3938// t8 (r22) is scratch
3939//-
3940
3941 CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3942Call_Pal_Swpipl:
3943 and r16, osfps_m_ipl, r16 // clean New ipl
3944 mfpr r22, pt_intmask // get int mask
3945
3946 extbl r22, r16, r22 // get mask for this ipl
3947 bis r11, r31, r0 // return old ipl
3948
3949 bis r16, r31, r11 // set new ps
3950 mtpr r22, ev5__ipl // set new mask
3951
3952 mfpr r31, pt0 // pad ipl write
3953 mfpr r31, pt0 // pad ipl write
3954
3955 hw_rei // back
3956
3957// .sbttl "rdps - PALcode for rdps instruction"
3958//+
3959//
3960// Entry:
3961// Vectored into via hardware PALcode instruction dispatch.
3962//
3963// Function:
3964// v0 (r0) <- ps
3965//-
3966
3967 CALL_PAL_PRIV(PAL_RDPS_ENTRY)
3968Call_Pal_Rdps:
3969 bis r11, r31, r0 // Fetch PALshadow PS
3970 nop // Must be 2 cycles long
3971 hw_rei
3972
3973// .sbttl "wrkgp - PALcode for wrkgp instruction"
3974//+
3975//
3976// Entry:
3977// Vectored into via hardware PALcode instruction dispatch.
3978//
3979// Function:
3980// kgp <- a0 (r16)
3981//-
3982
3983 CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
3984Call_Pal_Wrkgp:
3985 nop
3986 mtpr r16, pt_kgp
3987 nop // Pad for pt write->read restriction
3988 nop
3989 hw_rei
3990
3991// .sbttl "wrusp - PALcode for wrusp instruction"
3992//+
3993//
3994// Entry:
3995// Vectored into via hardware PALcode instruction dispatch.
3996//
3997// Function:
3998// usp <- a0 (r16)
3999//-
4000
4001 CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
4002Call_Pal_Wrusp:
4003 nop
4004 mtpr r16, pt_usp
4005 nop // Pad possible pt write->read restriction
4006 nop
4007 hw_rei
4008
4009// .sbttl "wrperfmon - PALcode for wrperfmon instruction"
4010//+
4011//
4012// Entry:
4013// Vectored into via hardware PALcode instruction dispatch.
4014//
4015//
4016// Function:
4017// Various control functions for the onchip performance counters
4018//
4019// option selector in r16
4020// option argument in r17
4021// returned status in r0
4022//
4023//
4024// r16 = 0 Disable performance monitoring for one or more cpu's
4025// r17 = 0 disable no counters
4026// r17 = bitmask disable counters specified in bit mask (1=disable)
4027//
4028// r16 = 1 Enable performance monitoring for one or more cpu's
4029// r17 = 0 enable no counters
4030// r17 = bitmask enable counters specified in bit mask (1=enable)
4031//
4032// r16 = 2 Mux select for one or more cpu's
4033// r17 = Mux selection (cpu specific)
4034// <24:19> bc_ctl<pm_mux_sel> field (see spec)
4035// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
4036//
4037// r16 = 3 Options
4038// r17 = (cpu specific)
4039// <0> = 0 log all processes
4040// <0> = 1 log only selected processes
4041// <30,9,8> mode select - ku,kp,kk
4042//
4043// r16 = 4 Interrupt frequency select
4044// r17 = (cpu specific) indicates interrupt frequencies desired for each
4045// counter, with "zero interrupts" being an option
4046// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
4047//
4048// r16 = 5 Read Counters
4049// r17 = na
4050// r0 = value (same format as ev5 pmctr)
4051// <0> = 0 Read failed
4052// <0> = 1 Read succeeded
4053//
4054// r16 = 6 Write Counters
4055// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
4056//
4057// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
4058// r17 = 0 enable no counters
4059// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
4060//
4061//=============================================================================
4062//Assumptions:
4063//PMCTR_CTL:
4064//
4065// <15:14> CTL0 -- encoded frequency select and enable - CTR0
4066// <13:12> CTL1 -- " - CTR1
4067// <11:10> CTL2 -- " - CTR2
4068//
4069// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
4070// <7:6> FRQ1 -- frequency select for CTR1
4071// <5:4> FRQ2 -- frequency select for CTR2
4072//
4073// <0> all vs. select processes (0=all,1=select)
4074//
4075// where
4076// FRQx<1:0>
4077// 0 1 disable interrupt
4078// 1 0 frequency = 65536 (16384 for ctr2)
4079// 1 1 frequency = 256
4080// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
4081//
4082//=============================================================================
4083//
4084 CALL_PAL_PRIV(0x0039)
4085// unsupported in Hudson code .. pboyle Nov/95
4086CALL_PAL_Wrperfmon:
4087#if perfmon_debug == 0
4088 // "real" performance monitoring code
4089 cmpeq r16, 1, r0 // check for enable
4090 bne r0, perfmon_en // br if requested to enable
4091
4092 cmpeq r16, 2, r0 // check for mux ctl
4093 bne r0, perfmon_muxctl // br if request to set mux controls
4094
4095 cmpeq r16, 3, r0 // check for options
4096 bne r0, perfmon_ctl // br if request to set options
4097
4098 cmpeq r16, 4, r0 // check for interrupt frequency select
4099 bne r0, perfmon_freq // br if request to change frequency select
4100
4101 cmpeq r16, 5, r0 // check for counter read request
4102 bne r0, perfmon_rd // br if request to read counters
4103
4104 cmpeq r16, 6, r0 // check for counter write request
4105 bne r0, perfmon_wr // br if request to write counters
4106
4107 cmpeq r16, 7, r0 // check for counter clear/enable request
4108 bne r0, perfmon_enclr // br if request to clear/enable counters
4109
4110 beq r16, perfmon_dis // br if requested to disable (r16=0)
4111 br r31, perfmon_unknown // br if unknown request
4112#else
4113
4114 br r31, pal_perfmon_debug
4115#endif
4116
4117// .sbttl "rdusp - PALcode for rdusp instruction"
4118//+
4119//
4120// Entry:
4121// Vectored into via hardware PALcode instruction dispatch.
4122//
4123// Function:
4124// v0 (r0) <- usp
4125//-
4126
4127 CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
4128Call_Pal_Rdusp:
4129 nop
4130 mfpr r0, pt_usp
4131 hw_rei
4132
4133
4134 CALL_PAL_PRIV(0x003B)
4135CallPal_OpcDec3B:
4136 br r31, osfpal_calpal_opcdec
4137
4138// .sbttl "whami - PALcode for whami instruction"
4139//+
4140//
4141// Entry:
4142// Vectored into via hardware PALcode instruction dispatch.
4143//
4144// Function:
4145// v0 (r0) <- whami
4146//-
4147 CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
4148Call_Pal_Whami:
4149 nop
4150 mfpr r0, pt_whami // Get Whami
4151 extbl r0, 1, r0 // Isolate just whami bits
4152 hw_rei
4153
4154// .sbttl "retsys - PALcode for retsys instruction"
4155//
4156// Entry:
4157// Vectored into via hardware PALcode instruction dispatch.
4158// 00(sp) contains return pc
4159// 08(sp) contains r29
4160//
4161// Function:
4162// Return from system call.
4163// mode switched from kern to user.
4164// stacks swapped, ugp, upc restored.
4165// r23, r25 junked
4166//-
4167
4168 CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
4169Call_Pal_Retsys:
4170 lda r25, osfsf_c_size(sp) // pop stack
4171 bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
4172
4173 mfpr r14, exc_addr // save exc_addr in case of fault
4174 ldq r23, osfsf_pc(sp) // get pc
4175
4176 ldq r29, osfsf_gp(sp) // get gp
4177 stl_c r31, -4(sp) // clear lock_flag
4178
4179 lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
4180 mfpr r30, pt_usp // get users stack
4181
4182 bic r23, 3, r23 // clean return pc
4183 mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
4184
4185 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
4186 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
4187
4188 mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
4189 mtpr r25, pt_ksp // save kern stack
4190
4191 rc r31 // clear inter_flag
4192// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
4193 hw_rei_spe // and back
4194
4195
4196 CALL_PAL_PRIV(0x003E)
4197CallPal_OpcDec3E:
4198 br r31, osfpal_calpal_opcdec
4199
4200// .sbttl "rti - PALcode for rti instruction"
4201//+
4202//
4203// Entry:
4204// Vectored into via hardware PALcode instruction dispatch.
4205//
4206// Function:
4207// 00(sp) -> ps
4208// 08(sp) -> pc
4209// 16(sp) -> r29 (gp)
4210// 24(sp) -> r16 (a0)
4211// 32(sp) -> r17 (a1)
4212// 40(sp) -> r18 (a3)
4213//-
4214
4215 CALL_PAL_PRIV(PAL_RTI_ENTRY)
4216#ifdef SIMOS
4217 /* called once by platform_tlaser */
4218 .globl Call_Pal_Rti
4219#endif
4220Call_Pal_Rti:
4221 lda r25, osfsf_c_size(sp) // get updated sp
4222 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
4223
4224 mfpr r14, exc_addr // save PC in case of fault
4225 rc r31 // clear intr_flag
4226
4227 ldq r12, -6*8(r25) // get ps
4228 ldq r13, -5*8(r25) // pc
4229
4230 ldq r18, -1*8(r25) // a2
4231 ldq r17, -2*8(r25) // a1
4232
4233 ldq r16, -3*8(r25) // a0
4234 ldq r29, -4*8(r25) // gp
4235
4236 bic r13, 3, r13 // clean return pc
4237 stl_c r31, -4(r25) // clear lock_flag
4238
4239 and r12, osfps_m_mode, r11 // get mode
4240 mtpr r13, exc_addr // set return address
4241
4242 beq r11, rti_to_kern // br if rti to Kern
4243 br r31, rti_to_user // out of call_pal space
4244
4245
4246// .sbttl "Start the Unprivileged CALL_PAL Entry Points"
4247// .sbttl "bpt- PALcode for bpt instruction"
4248//+
4249//
4250// Entry:
4251// Vectored into via hardware PALcode instruction dispatch.
4252//
4253// Function:
4254// Build stack frame
4255// a0 <- code
4256// a1 <- unpred
4257// a2 <- unpred
4258// vector via entIF
4259//
4260//-
4261//
4262 .text 1
4263// . = 0x3000
4264 CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
4265Call_Pal_Bpt:
4266 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4267 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4268
4269 bis r11, r31, r12 // Save PS for stack write
4270 bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
4271
4272 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4273 // no virt ref for next 2 cycles
4274 mtpr r30, pt_usp // save user stack
4275
4276 bis r31, r31, r11 // Set new PS
4277 mfpr r30, pt_ksp
4278
4279CALL_PAL_bpt_10_:
4280 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4281 mfpr r14, exc_addr // get pc
4282
4283 stq r16, osfsf_a0(sp) // save regs
4284 bis r31, osf_a0_bpt, r16 // set a0
4285
4286 stq r17, osfsf_a1(sp) // a1
4287 br r31, bpt_bchk_common // out of call_pal space
4288
4289
4290// .sbttl "bugchk- PALcode for bugchk instruction"
4291//+
4292//
4293// Entry:
4294// Vectored into via hardware PALcode instruction dispatch.
4295//
4296// Function:
4297// Build stack frame
4298// a0 <- code
4299// a1 <- unpred
4300// a2 <- unpred
4301// vector via entIF
4302//
4303//-
4304//
4305 CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
4306Call_Pal_Bugchk:
4307 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4308 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4309
4310 bis r11, r31, r12 // Save PS for stack write
4311 bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
4312
4313 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4314 // no virt ref for next 2 cycles
4315 mtpr r30, pt_usp // save user stack
4316
4317 bis r31, r31, r11 // Set new PS
4318 mfpr r30, pt_ksp
4319
4320CALL_PAL_bugchk_10_:
4321 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4322 mfpr r14, exc_addr // get pc
4323
4324 stq r16, osfsf_a0(sp) // save regs
4325 bis r31, osf_a0_bugchk, r16 // set a0
4326
4327 stq r17, osfsf_a1(sp) // a1
4328 br r31, bpt_bchk_common // out of call_pal space
4329
4330
4331 CALL_PAL_UNPRIV(0x0082)
4332CallPal_OpcDec82:
4333 br r31, osfpal_calpal_opcdec
4334
4335// .sbttl "callsys - PALcode for callsys instruction"
4336//+
4337//
4338// Entry:
4339// Vectored into via hardware PALcode instruction dispatch.
4340//
4341// Function:
4342// Switch mode to kernel and build a callsys stack frame.
4343// sp = ksp
4344// gp = kgp
4345// t8 - t10 (r22-r24) trashed
4346//
4347//-
4348//
4349 CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
4350Call_Pal_Callsys:
4351
4352 and r11, osfps_m_mode, r24 // get mode
4353 mfpr r22, pt_ksp // get ksp
4354
4355 beq r24, sys_from_kern // sysCall from kern is not allowed
4356 mfpr r12, pt_entsys // get address of callSys routine
4357
4358//+
4359// from here on we know we are in user going to Kern
4360//-
4361 mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
4362 mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
4363
4364 bis r31, r31, r11 // PS=0 (mode=kern)
4365 mfpr r23, exc_addr // get pc
4366
4367 mtpr r30, pt_usp // save usp
4368 lda sp, 0-osfsf_c_size(r22)// set new sp
4369
4370 stq r29, osfsf_gp(sp) // save user gp/r29
4371 stq r24, osfsf_ps(sp) // save ps
4372
4373 stq r23, osfsf_pc(sp) // save pc
4374 mtpr r12, exc_addr // set address
4375 // 1 cycle to hw_rei
4376
4377 mfpr r29, pt_kgp // get the kern gp/r29
4378
4379 hw_rei_spe // and off we go!
4380
4381
4382 CALL_PAL_UNPRIV(0x0084)
4383CallPal_OpcDec84:
4384 br r31, osfpal_calpal_opcdec
4385
4386 CALL_PAL_UNPRIV(0x0085)
4387CallPal_OpcDec85:
4388 br r31, osfpal_calpal_opcdec
4389
4390// .sbttl "imb - PALcode for imb instruction"
4391//+
4392//
4393// Entry:
4394// Vectored into via hardware PALcode instruction dispatch.
4395//
4396// Function:
4397// Flush the writebuffer and flush the Icache
4398//
4399//-
4400//
4401 CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
4402Call_Pal_Imb:
4403 mb // Clear the writebuffer
4404 mfpr r31, ev5__mcsr // Sync with clear
4405 nop
4406 nop
4407 br r31, pal_ic_flush // Flush Icache
4408
4409
4410// .sbttl "CALL_PAL OPCDECs"
4411
4412 CALL_PAL_UNPRIV(0x0087)
4413CallPal_OpcDec87:
4414 br r31, osfpal_calpal_opcdec
4415
4416 CALL_PAL_UNPRIV(0x0088)
4417CallPal_OpcDec88:
4418 br r31, osfpal_calpal_opcdec
4419
4420 CALL_PAL_UNPRIV(0x0089)
4421CallPal_OpcDec89:
4422 br r31, osfpal_calpal_opcdec
4423
4424 CALL_PAL_UNPRIV(0x008A)
4425CallPal_OpcDec8A:
4426 br r31, osfpal_calpal_opcdec
4427
4428 CALL_PAL_UNPRIV(0x008B)
4429CallPal_OpcDec8B:
4430 br r31, osfpal_calpal_opcdec
4431
4432 CALL_PAL_UNPRIV(0x008C)
4433CallPal_OpcDec8C:
4434 br r31, osfpal_calpal_opcdec
4435
4436 CALL_PAL_UNPRIV(0x008D)
4437CallPal_OpcDec8D:
4438 br r31, osfpal_calpal_opcdec
4439
4440 CALL_PAL_UNPRIV(0x008E)
4441CallPal_OpcDec8E:
4442 br r31, osfpal_calpal_opcdec
4443
4444 CALL_PAL_UNPRIV(0x008F)
4445CallPal_OpcDec8F:
4446 br r31, osfpal_calpal_opcdec
4447
4448 CALL_PAL_UNPRIV(0x0090)
4449CallPal_OpcDec90:
4450 br r31, osfpal_calpal_opcdec
4451
4452 CALL_PAL_UNPRIV(0x0091)
4453CallPal_OpcDec91:
4454 br r31, osfpal_calpal_opcdec
4455
4456 CALL_PAL_UNPRIV(0x0092)
4457CallPal_OpcDec92:
4458 br r31, osfpal_calpal_opcdec
4459
4460 CALL_PAL_UNPRIV(0x0093)
4461CallPal_OpcDec93:
4462 br r31, osfpal_calpal_opcdec
4463
4464 CALL_PAL_UNPRIV(0x0094)
4465CallPal_OpcDec94:
4466 br r31, osfpal_calpal_opcdec
4467
4468 CALL_PAL_UNPRIV(0x0095)
4469CallPal_OpcDec95:
4470 br r31, osfpal_calpal_opcdec
4471
4472 CALL_PAL_UNPRIV(0x0096)
4473CallPal_OpcDec96:
4474 br r31, osfpal_calpal_opcdec
4475
4476 CALL_PAL_UNPRIV(0x0097)
4477CallPal_OpcDec97:
4478 br r31, osfpal_calpal_opcdec
4479
4480 CALL_PAL_UNPRIV(0x0098)
4481CallPal_OpcDec98:
4482 br r31, osfpal_calpal_opcdec
4483
4484 CALL_PAL_UNPRIV(0x0099)
4485CallPal_OpcDec99:
4486 br r31, osfpal_calpal_opcdec
4487
4488 CALL_PAL_UNPRIV(0x009A)
4489CallPal_OpcDec9A:
4490 br r31, osfpal_calpal_opcdec
4491
4492 CALL_PAL_UNPRIV(0x009B)
4493CallPal_OpcDec9B:
4494 br r31, osfpal_calpal_opcdec
4495
4496 CALL_PAL_UNPRIV(0x009C)
4497CallPal_OpcDec9C:
4498 br r31, osfpal_calpal_opcdec
4499
4500 CALL_PAL_UNPRIV(0x009D)
4501CallPal_OpcDec9D:
4502 br r31, osfpal_calpal_opcdec
4503
4504// .sbttl "rdunique - PALcode for rdunique instruction"
4505//+
4506//
4507// Entry:
4508// Vectored into via hardware PALcode instruction dispatch.
4509//
4510// Function:
4511// v0 (r0) <- unique
4512//
4513//-
4514//
4515 CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
4516CALL_PALrdunique_:
4517 mfpr r0, pt_pcbb // get pcb pointer
4518 ldqp r0, osfpcb_q_unique(r0) // get new value
4519
4520 hw_rei
4521
4522// .sbttl "wrunique - PALcode for wrunique instruction"
4523//+
4524//
4525// Entry:
4526// Vectored into via hardware PALcode instruction dispatch.
4527//
4528// Function:
4529// unique <- a0 (r16)
4530//
4531//-
4532//
4533CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
4534CALL_PAL_Wrunique:
4535 nop
4536 mfpr r12, pt_pcbb // get pcb pointer
4537 stqp r16, osfpcb_q_unique(r12)// get new value
4538 nop // Pad palshadow write
4539 hw_rei // back
4540
4541// .sbttl "CALL_PAL OPCDECs"
4542
4543 CALL_PAL_UNPRIV(0x00A0)
4544CallPal_OpcDecA0:
4545 br r31, osfpal_calpal_opcdec
4546
4547 CALL_PAL_UNPRIV(0x00A1)
4548CallPal_OpcDecA1:
4549 br r31, osfpal_calpal_opcdec
4550
4551 CALL_PAL_UNPRIV(0x00A2)
4552CallPal_OpcDecA2:
4553 br r31, osfpal_calpal_opcdec
4554
4555 CALL_PAL_UNPRIV(0x00A3)
4556CallPal_OpcDecA3:
4557 br r31, osfpal_calpal_opcdec
4558
4559 CALL_PAL_UNPRIV(0x00A4)
4560CallPal_OpcDecA4:
4561 br r31, osfpal_calpal_opcdec
4562
4563 CALL_PAL_UNPRIV(0x00A5)
4564CallPal_OpcDecA5:
4565 br r31, osfpal_calpal_opcdec
4566
4567 CALL_PAL_UNPRIV(0x00A6)
4568CallPal_OpcDecA6:
4569 br r31, osfpal_calpal_opcdec
4570
4571 CALL_PAL_UNPRIV(0x00A7)
4572CallPal_OpcDecA7:
4573 br r31, osfpal_calpal_opcdec
4574
4575 CALL_PAL_UNPRIV(0x00A8)
4576CallPal_OpcDecA8:
4577 br r31, osfpal_calpal_opcdec
4578
4579 CALL_PAL_UNPRIV(0x00A9)
4580CallPal_OpcDecA9:
4581 br r31, osfpal_calpal_opcdec
4582
4583
4584// .sbttl "gentrap - PALcode for gentrap instruction"
4585//+
4586// CALL_PAL_gentrap:
4587// Entry:
4588// Vectored into via hardware PALcode instruction dispatch.
4589//
4590// Function:
4591// Build stack frame
4592// a0 <- code
4593// a1 <- unpred
4594// a2 <- unpred
4595// vector via entIF
4596//
4597//-
4598
4599 CALL_PAL_UNPRIV(0x00AA)
4600// unsupported in Hudson code .. pboyle Nov/95
4601CALL_PAL_gentrap:
4602 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4603 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4604
4605 bis r11, r31, r12 // Save PS for stack write
4606 bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
4607
4608 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4609 // no virt ref for next 2 cycles
4610 mtpr r30, pt_usp // save user stack
4611
4612 bis r31, r31, r11 // Set new PS
4613 mfpr r30, pt_ksp
4614
4615CALL_PAL_gentrap_10_:
4616 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4617 mfpr r14, exc_addr // get pc
4618
4619 stq r16, osfsf_a0(sp) // save regs
4620 bis r31, osf_a0_gentrap, r16// set a0
4621
4622 stq r17, osfsf_a1(sp) // a1
4623 br r31, bpt_bchk_common // out of call_pal space
4624
4625
4626// .sbttl "CALL_PAL OPCDECs"
4627
4628 CALL_PAL_UNPRIV(0x00AB)
4629CallPal_OpcDecAB:
4630 br r31, osfpal_calpal_opcdec
4631
4632 CALL_PAL_UNPRIV(0x00AC)
4633CallPal_OpcDecAC:
4634 br r31, osfpal_calpal_opcdec
4635
4636 CALL_PAL_UNPRIV(0x00AD)
4637CallPal_OpcDecAD:
4638 br r31, osfpal_calpal_opcdec
4639
4640 CALL_PAL_UNPRIV(0x00AE)
4641CallPal_OpcDecAE:
4642 br r31, osfpal_calpal_opcdec
4643
4644 CALL_PAL_UNPRIV(0x00AF)
4645CallPal_OpcDecAF:
4646 br r31, osfpal_calpal_opcdec
4647
4648 CALL_PAL_UNPRIV(0x00B0)
4649CallPal_OpcDecB0:
4650 br r31, osfpal_calpal_opcdec
4651
4652 CALL_PAL_UNPRIV(0x00B1)
4653CallPal_OpcDecB1:
4654 br r31, osfpal_calpal_opcdec
4655
4656 CALL_PAL_UNPRIV(0x00B2)
4657CallPal_OpcDecB2:
4658 br r31, osfpal_calpal_opcdec
4659
4660 CALL_PAL_UNPRIV(0x00B3)
4661CallPal_OpcDecB3:
4662 br r31, osfpal_calpal_opcdec
4663
4664 CALL_PAL_UNPRIV(0x00B4)
4665CallPal_OpcDecB4:
4666 br r31, osfpal_calpal_opcdec
4667
4668 CALL_PAL_UNPRIV(0x00B5)
4669CallPal_OpcDecB5:
4670 br r31, osfpal_calpal_opcdec
4671
4672 CALL_PAL_UNPRIV(0x00B6)
4673CallPal_OpcDecB6:
4674 br r31, osfpal_calpal_opcdec
4675
4676 CALL_PAL_UNPRIV(0x00B7)
4677CallPal_OpcDecB7:
4678 br r31, osfpal_calpal_opcdec
4679
4680 CALL_PAL_UNPRIV(0x00B8)
4681CallPal_OpcDecB8:
4682 br r31, osfpal_calpal_opcdec
4683
4684 CALL_PAL_UNPRIV(0x00B9)
4685CallPal_OpcDecB9:
4686 br r31, osfpal_calpal_opcdec
4687
4688 CALL_PAL_UNPRIV(0x00BA)
4689CallPal_OpcDecBA:
4690 br r31, osfpal_calpal_opcdec
4691
4692 CALL_PAL_UNPRIV(0x00BB)
4693CallPal_OpcDecBB:
4694 br r31, osfpal_calpal_opcdec
4695
4696 CALL_PAL_UNPRIV(0x00BC)
4697CallPal_OpcDecBC:
4698 br r31, osfpal_calpal_opcdec
4699
4700 CALL_PAL_UNPRIV(0x00BD)
4701CallPal_OpcDecBD:
4702 br r31, osfpal_calpal_opcdec
4703
4704 CALL_PAL_UNPRIV(0x00BE)
4705CallPal_OpcDecBE:
4706 br r31, osfpal_calpal_opcdec
4707
4708 CALL_PAL_UNPRIV(0x00BF)
4709CallPal_OpcDecBF:
4710 // MODIFIED BY EGH 2/25/04
4711 br r31, copypal_impl
4712
4713
4714/*======================================================================*/
4715/* OSF/1 CALL_PAL CONTINUATION AREA */
4716/*======================================================================*/
4717
4718 .text 2
4719
4720 . = 0x4000
4721
4722
4723// .sbttl "Continuation of MTPR_PERFMON"
4724 ALIGN_BLOCK
4725#if perfmon_debug == 0
4726 // "real" performance monitoring code
4727// mux ctl
4728perfmon_muxctl:
4729 lda r8, 1(r31) // get a 1
4730 sll r8, pmctr_v_sel0, r8 // move to sel0 position
4731 or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
4732 and r17, r8, r25 // isolate pmctr mux select bits
4733 mfpr r0, ev5__pmctr
4734 bic r0, r8, r0 // clear old mux select bits
4735 or r0,r25, r25 // or in new mux select bits
4736 mtpr r25, ev5__pmctr
4737
4738 // ok, now tackle cbox mux selects
4739 ldah r14, 0xfff0(r31)
4740 zap r14, 0xE0, r14 // Get Cbox IPR base
4741//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
4742// adapted from ev5_pal_macros.mar
4743 mfpr r16, pt_impure
4744 lda r16, CNS_Q_IPR(r16)
4745 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
4746
4747 lda r8, 0x3F(r31) // build mux select mask
4748 sll r8, bc_ctl_v_pm_mux_sel, r8
4749
4750 and r17, r8, r25 // isolate bc_ctl mux select bits
4751 bic r16, r8, r16 // isolate old mux select bits
4752 or r16, r25, r25 // create new bc_ctl
4753 mb // clear out cbox for future ipr write
4754 stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
4755 mb // clear out cbox for future ipr write
4756
4757//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
4758// adapted from ev5_pal_macros.mar
4759 mfpr r16, pt_impure
4760 lda r16, CNS_Q_IPR(r16)
4761 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
4762
4763 br r31, perfmon_success
4764
4765
4766// requested to disable perf monitoring
4767perfmon_dis:
4768 mfpr r14, ev5__pmctr // read ibox pmctr ipr
4769perfmon_dis_ctr0: // and begin with ctr0
4770 blbc r17, perfmon_dis_ctr1 // do not disable ctr0
4771 lda r8, 3(r31)
4772 sll r8, pmctr_v_ctl0, r8
4773 bic r14, r8, r14 // disable ctr0
4774perfmon_dis_ctr1:
4775 srl r17, 1, r17
4776 blbc r17, perfmon_dis_ctr2 // do not disable ctr1
4777 lda r8, 3(r31)
4778 sll r8, pmctr_v_ctl1, r8
4779 bic r14, r8, r14 // disable ctr1
4780perfmon_dis_ctr2:
4781 srl r17, 1, r17
4782 blbc r17, perfmon_dis_update // do not disable ctr2
4783 lda r8, 3(r31)
4784 sll r8, pmctr_v_ctl2, r8
4785 bic r14, r8, r14 // disable ctr2
4786perfmon_dis_update:
4787 mtpr r14, ev5__pmctr // update pmctr ipr
4788//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
4789// adapted from ev5_pal_macros.mar
4790//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
4791 mfpr r25, pt_impure
4792 lda r25, CNS_Q_IPR(r25)
4793 RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
4794
4795 lda r17, 0x3F(r31) // build mask
4796 sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
4797 and r14, r17, r14 // isolate ctl bits
4798 bic r8, r17, r8 // clear out old ctl bits
4799 or r14, r8, r14 // create shadow ctl bits
4800//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
4801//adjusted impure pointer still in r25
4802 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
4803
4804 br r31, perfmon_success
4805
4806
4807// requested to enable perf monitoring
4808//;the following code can be greatly simplified for pass2, but should work fine as is.
4809
4810
4811perfmon_enclr:
4812 lda r9, 1(r31) // set enclr flag
4813 br perfmon_en_cont
4814
4815perfmon_en:
4816 bis r31, r31, r9 // clear enclr flag
4817
4818perfmon_en_cont:
4819 mfpr r8, pt_pcbb // get PCB base
4820//orig get_pmctr_ctl r25, r25
4821 mfpr r25, pt_impure
4822 lda r25, CNS_Q_IPR(r25)
4823 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
4824
4825 ldqp r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
4826 mfpr r14, ev5__pmctr // read ibox pmctr ipr
4827 srl r16, osfpcb_v_pme, r16 // get pme bit
4828 mfpr r13, icsr
4829 and r16, 1, r16 // isolate pme bit
4830
4831 // this code only needed in pass2 and later
4832//orig sget_addr r12, 1<<icsr_v_pmp, r31
4833 lda r12, 1<<icsr_v_pmp(r31) // pb
4834 bic r13, r12, r13 // clear pmp bit
4835 sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
4836 or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
4837 ev5_pass2 mtpr r13, icsr // update icsr
4838
4839#if ev5_p1 != 0
4840 lda r12, 1(r31)
4841 cmovlbc r25, r12, r16 // r16<0> set if either pme=1 or sprocess=0 (sprocess in bit 0 of r25)
4842#else
4843 bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
4844#endif
4845
4846 sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
4847 bis r14, r31, r13 // copy pmctr
4848
4849perfmon_en_ctr0: // and begin with ctr0
4850 blbc r17, perfmon_en_ctr1 // do not enable ctr0
4851
4852 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
4853 lda r8, 0xffff(r31)
4854 zapnot r8, 3, r8 // ctr0<15:0> mask
4855 sll r8, pmctr_v_ctr0, r8
4856 bic r14, r8, r14 // clear ctr bits
4857 bic r13, r8, r13 // clear ctr bits
4858
4859perfmon_en_noclr0:
4860//orig get_addr r8, 3<<pmctr_v_ctl0, r31
4861 LDLI(r8, (3<<pmctr_v_ctl0))
4862 and r25, r8, r12 //isolate frequency select bits for ctr0
4863 bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
4864 or r14,r12,r14 // or in new ctl0 bits
4865
4866perfmon_en_ctr1: // enable ctr1
4867 srl r17, 1, r17 // get ctr1 enable
4868 blbc r17, perfmon_en_ctr2 // do not enable ctr1
4869
4870 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
4871 lda r8, 0xffff(r31)
4872 zapnot r8, 3, r8 // ctr1<15:0> mask
4873 sll r8, pmctr_v_ctr1, r8
4874 bic r14, r8, r14 // clear ctr bits
4875 bic r13, r8, r13 // clear ctr bits
4876
4877perfmon_en_noclr1:
4878//orig get_addr r8, 3<<pmctr_v_ctl1, r31
4879 LDLI(r8, (3<<pmctr_v_ctl1))
4880 and r25, r8, r12 //isolate frequency select bits for ctr1
4881 bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
4882 or r14,r12,r14 // or in new ctl1 bits
4883
4884perfmon_en_ctr2: // enable ctr2
4885 srl r17, 1, r17 // get ctr2 enable
4886 blbc r17, perfmon_en_return // do not enable ctr2 - return
4887
4888 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
4889 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4890 sll r8, pmctr_v_ctr2, r8
4891 bic r14, r8, r14 // clear ctr bits
4892 bic r13, r8, r13 // clear ctr bits
4893
4894perfmon_en_noclr2:
4895//orig get_addr r8, 3<<pmctr_v_ctl2, r31
4896 LDLI(r8, (3<<pmctr_v_ctl2))
4897 and r25, r8, r12 //isolate frequency select bits for ctr2
4898 bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
4899 or r14,r12,r14 // or in new ctl2 bits
4900
4901perfmon_en_return:
4902 cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
4903 // else only do the counter clears
4904 mtpr r13, ev5__pmctr // update pmctr ipr
4905
4906//;this code not needed for pass2 and later, but does not hurt to leave it in
4907 lda r8, 0x3F(r31)
4908//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
4909 mfpr r12, pt_impure
4910 lda r12, CNS_Q_IPR(r12)
4911 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
4912
4913 sll r8, pmctr_v_ctl2, r8 // build ctl mask
4914 and r8, r14, r14 // isolate new ctl bits
4915 bic r25, r8, r25 // clear out old ctl value
4916 or r25, r14, r14 // create new pmctr_ctl
4917//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
4918 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4919
4920 br r31, perfmon_success
4921
4922
4923// options...
4924perfmon_ctl:
4925
4926// set mode
4927//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4928 mfpr r12, pt_impure
4929 lda r12, CNS_Q_IPR(r12)
4930 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4931
4932//orig get_addr r8, (1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk), r31 // build mode mask for pmctr register
4933 LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
4934 mfpr r0, ev5__pmctr
4935 and r17, r8, r25 // isolate pmctr mode bits
4936 bic r0, r8, r0 // clear old mode bits
4937 or r0, r25, r25 // or in new mode bits
4938 mtpr r25, ev5__pmctr
4939
4940//;the following code will only be used in pass2, but should not hurt anything if run in pass1.
4941 mfpr r8, icsr
4942 lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
4943 bic r8, r25, r8 // clear old pma bit
4944 cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
4945 or r8, r25, r8
4946 ev5_pass2 mtpr r8, icsr // 4 bubbles to hw_rei
4947 mfpr r31, pt0 // pad icsr write
4948 mfpr r31, pt0 // pad icsr write
4949
4950//;the following code not needed for pass2 and later, but should work anyway.
4951 bis r14, 1, r14 // set for select processes
4952 blbs r17, perfmon_sp // branch if select processes
4953 bic r14, 1, r14 // all processes
4954perfmon_sp:
4955//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4956 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4957 br r31, perfmon_success
4958
4959// counter frequency select
4960perfmon_freq:
4961//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4962 mfpr r12, pt_impure
4963 lda r12, CNS_Q_IPR(r12)
4964 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4965
4966 lda r8, 0x3F(r31)
4967//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
4968// I guess this should be a shift of 4 bits from the above control register structure .. pb
4969#define pmctr_ctl_v_frq2_SHIFT 4
4970 sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
4971
4972 and r8, r17, r17
4973 bic r14, r8, r14 // clear out old frequency select bits
4974
4975 or r17, r14, r14 // or in new frequency select info
4976//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4977 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4978
4979 br r31, perfmon_success
4980
4981// read counters
4982perfmon_rd:
4983 mfpr r0, ev5__pmctr
4984 or r0, 1, r0 // or in return status
4985 hw_rei // back to user
4986
4987// write counters
4988perfmon_wr:
4989 mfpr r14, ev5__pmctr
4990 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4991 sll r8, pmctr_v_ctr2, r8
4992
4993//orig get_addr r9, 0xFFFFFFFF, r31, verify=0 // ctr2<15:0>,ctr1<15:0> mask
4994 LDLI(r9, (0xFFFFFFFF))
4995 sll r9, pmctr_v_ctr1, r9
4996 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
4997 bic r14, r8, r14 // clear ctr fields
4998 and r17, r8, r25 // clear all but ctr fields
4999 or r25, r14, r14 // write ctr fields
5000 mtpr r14, ev5__pmctr // update pmctr ipr
5001
5002 mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
5003
5004perfmon_success:
5005 or r31, 1, r0 // set success
5006 hw_rei // back to user
5007
5008perfmon_unknown:
5009 or r31, r31, r0 // set fail
5010 hw_rei // back to user
5011
5012#else
5013
5014// end of "real code", start of debug code
5015
5016//+
5017// Debug environment:
5018// (in pass2, always set icsr<pma> to ensure master counter enable is on)
5019// R16 = 0 Write to on-chip performance monitor ipr
5020// r17 = on-chip ipr
5021// r0 = return value of read of on-chip performance monitor ipr
5022// R16 = 1 Setup Cbox mux selects
5023// r17 = Cbox mux selects in same position as in bc_ctl ipr.
5024// r0 = return value of read of on-chip performance monitor ipr
5025//
5026//-
5027pal_perfmon_debug:
5028 mfpr r8, icsr
5029 lda r9, 1<<icsr_v_pma(r31)
5030 bis r8, r9, r8
5031 mtpr r8, icsr
5032
5033 mfpr r0, ev5__pmctr // read old value
5034 bne r16, cbox_mux_sel
5035
5036 mtpr r17, ev5__pmctr // update pmctr ipr
5037 br r31, end_pm
5038
5039cbox_mux_sel:
5040 // ok, now tackle cbox mux selects
5041 ldah r14, 0xfff0(r31)
5042 zap r14, 0xE0, r14 // Get Cbox IPR base
5043//orig get_bc_ctl_shadow r16 // bc_ctl returned
5044 mfpr r16, pt_impure
5045 lda r16, CNS_Q_IPR(r16)
5046 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
5047
5048 lda r8, 0x3F(r31) // build mux select mask
5049 sll r8, BC_CTL_V_PM_MUX_SEL, r8
5050
5051 and r17, r8, r25 // isolate bc_ctl mux select bits
5052 bic r16, r8, r16 // isolate old mux select bits
5053 or r16, r25, r25 // create new bc_ctl
5054 mb // clear out cbox for future ipr write
5055 stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
5056 mb // clear out cbox for future ipr write
5057//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
5058 mfpr r16, pt_impure
5059 lda r16, CNS_Q_IPR(r16)
5060 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
5061
5062end_pm: hw_rei
5063
5064#endif
5065
5066
5067//;The following code is a workaround for a cpu bug where Istream prefetches to
5068//;super-page address space in user mode may escape off-chip.
5069#if spe_fix != 0
5070
5071 ALIGN_BLOCK
5072hw_rei_update_spe:
5073 mfpr r12, pt_misc // get previous mode
5074 srl r11, osfps_v_mode, r10 // isolate current mode bit
5075 and r10, 1, r10
5076 extbl r12, 7, r8 // get previous mode field
5077 and r8, 1, r8 // isolate previous mode bit
5078 cmpeq r10, r8, r8 // compare previous and current modes
5079 beq r8, hw_rei_update_spe_5_
5080 hw_rei // if same, just return
5081
5082hw_rei_update_spe_5_:
5083
5084#if fill_err_hack != 0
5085
5086 fill_error_hack
5087#endif
5088
5089 mfpr r8, icsr // get current icsr value
5090 ldah r9, (2<<(icsr_v_spe-16))(r31) // get spe bit mask
5091 bic r8, r9, r8 // disable spe
5092 xor r10, 1, r9 // flip mode for new spe bit
5093 sll r9, icsr_v_spe+1, r9 // shift into position
5094 bis r8, r9, r8 // enable/disable spe
5095 lda r9, 1(r31) // now update our flag
5096 sll r9, pt_misc_v_cm, r9 // previous mode saved bit mask
5097 bic r12, r9, r12 // clear saved previous mode
5098 sll r10, pt_misc_v_cm, r9 // current mode saved bit mask
5099 bis r12, r9, r12 // set saved current mode
5100 mtpr r12, pt_misc // update pt_misc
5101 mtpr r8, icsr // update icsr
5102
5103#if osf_chm_fix != 0
5104
5105
5106 blbc r10, hw_rei_update_spe_10_ // branch if not user mode
5107
5108 mb // ensure no outstanding fills
5109 lda r12, 1<<dc_mode_v_dc_ena(r31) // User mode
5110 mtpr r12, dc_mode // Turn on dcache
5111 mtpr r31, dc_flush // and flush it
5112 br r31, pal_ic_flush
5113
5114hw_rei_update_spe_10_: mfpr r9, pt_pcbb // Kernel mode
5115 ldqp r9, osfpcb_q_Fen(r9) // get FEN
5116 blbc r9, pal_ic_flush // return if FP disabled
5117 mb // ensure no outstanding fills
5118 mtpr r31, dc_mode // turn off dcache
5119#endif
5120
5121
5122 br r31, pal_ic_flush // Pal restriction - must flush Icache if changing ICSR<SPE>
5123#endif
5124
5125
5126copypal_impl:
5127 mov r16, r0
5128 ble r18, finished #if len <=0 we are finished
5129 ldq_u r8, 0(r17)
5130 xor r17, r16, r9
5131 and r9, 7, r9
5132 and r16, 7, r10
5133 bne r9, unaligned
5134 beq r10, aligned
5135 ldq_u r9, 0(r16)
5136 addq r18, r10, r18
5137 mskqh r8, r17, r8
5138 mskql r9, r17, r9
5139 bis r8, r9, r8
5140aligned:
5141 subq r18, 1, r10
5142 bic r10, 7, r10
5143 and r18, 7, r18
5144 beq r10, aligned_done
5145loop:
5146 stq_u r8, 0(r16)
5147 ldq_u r8, 8(r17)
5148 subq r10, 8, r10
5149 lda r16,8(r16)
5150 lda r17,8(r17)
5151 bne r10, loop
5152aligned_done:
5153 bne r18, few_left
5154 stq_u r8, 0(r16)
5155 br r31, finished
5156 few_left:
5157 mskql r8, r18, r10
5158 ldq_u r9, 0(r16)
5159 mskqh r9, r18, r9
5160 bis r10, r9, r10
5161 stq_u r10, 0(r16)
5162 br r31, finished
5163unaligned:
5164 addq r17, r18, r25
5165 cmpule r18, 8, r9
5166 bne r9, unaligned_few_left
5167 beq r10, unaligned_dest_aligned
5168 and r16, 7, r10
5169 subq r31, r10, r10
5170 addq r10, 8, r10
5171 ldq_u r9, 7(r17)
5172 extql r8, r17, r8
5173 extqh r9, r17, r9
5174 bis r8, r9, r12
5175 insql r12, r16, r12
5176 ldq_u r13, 0(r16)
5177 mskql r13, r16, r13
5178 bis r12, r13, r12
5179 stq_u r12, 0(r16)
5180 addq r16, r10, r16
5181 addq r17, r10, r17
5182 subq r18, r10, r18
5183 ldq_u r8, 0(r17)
5184unaligned_dest_aligned:
5185 subq r18, 1, r10
5186 bic r10, 7, r10
5187 and r18, 7, r18
5188 beq r10, unaligned_partial_left
5189unaligned_loop:
5190 ldq_u r9, 7(r17)
5191 lda r17, 8(r17)
5192 extql r8, r17, r12
5193 extqh r9, r17, r13
5194 subq r10, 8, r10
5195 bis r12, r13, r13
5196 stq r13, 0(r16)
5197 lda r16, 8(r16)
5198 beq r10, unaligned_second_partial_left
5199 ldq_u r8, 7(r17)
5200 lda r17, 8(r17)
5201 extql r9, r17, r12
5202 extqh r8, r17, r13
5203 bis r12, r13, r13
5204 subq r10, 8, r10
5205 stq r13, 0(r16)
5206 lda r16, 8(r16)
5207 bne r10, unaligned_loop
5208unaligned_partial_left:
5209 mov r8, r9
5210unaligned_second_partial_left:
5211 ldq_u r8, -1(r25)
5212 extql r9, r17, r9
5213 extqh r8, r17, r8
5214 bis r8, r9, r8
5215 bne r18, few_left
5216 stq_u r8, 0(r16)
5217 br r31, finished
5218unaligned_few_left:
5219 ldq_u r9, -1(r25)
5220 extql r8, r17, r8
5221 extqh r9, r17, r9
5222 bis r8, r9, r8
5223 insqh r8, r16, r9
5224 insql r8, r16, r8
5225 lda r12, -1(r31)
5226 mskql r12, r18, r13
5227 cmovne r13, r13, r12
5228 insqh r12, r16, r13
5229 insql r12, r16, r12
5230 addq r16, r18, r10
5231 ldq_u r14, 0(r16)
5232 ldq_u r25, -1(r10)
5233 bic r14, r12, r14
5234 bic r25, r13, r25
5235 and r8, r12, r8
5236 and r9, r13, r9
5237 bis r8, r14, r8
5238 bis r9, r25, r9
5239 stq_u r9, -1(r10)
5240 stq_u r8, 0(r16)
5241finished:
5242 hw_rei
54// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
55// since we don't have a mechanism to expand the data structures.... pb Nov/95
56
57// build_fixed_image: not sure what means
58// real_mm to be replaced during rewrite
59// remove_save_state remove_restore_state can be remooved to save space ??
60
61
62#include "ev5_defs.h"
63#include "ev5_impure.h"
64#include "ev5_alpha_defs.h"
65#include "ev5_paldef.h"
66#include "ev5_osfalpha_defs.h"
67#include "fromHudsonMacros.h"
68#include "fromHudsonOsf.h"
69#include "dc21164FromGasSources.h"
70
71#ifdef SIMOS
72#define DEBUGSTORE(c) nop
73#else
74#define DEBUGSTORE(c) \
75 lda r13, c(zero) ; \
76 bsr r25, debugstore
77#endif
78
79#define DEBUG_EXC_ADDR()\
80 bsr r25, put_exc_addr; \
81 DEBUGSTORE(13) ; \
82 DEBUGSTORE(10)
83
84#define egore 0
85#define acore 0
86#define beh_model 0
87#define ev5_p2 1
88#define ev5_p1 0
89#define ldvpte_bug_fix 1
90#define osf_chm_fix 0
91
92// Do we want to do this?? pb
93#define spe_fix 0
94// Do we want to do this?? pb
95#define build_fixed_image 0
96
97#define ev5_pass2
98#define enable_p4_fixups 0
99#define osf_svmin 1
100#define enable_physical_console 0
101#define fill_err_hack 0
102#define icflush_on_tbix 0
103#define max_cpuid 1
104#define perfmon_debug 0
105#define rawhide_system 0
106#define rax_mode 0
107
108
109// This is the fix for the user-mode super page references causing the machine to crash.
110#if (spe_fix == 1) && (build_fixed_image==1)
111#define hw_rei_spe br r31, hw_rei_update_spe
112#else
113#define hw_rei_spe hw_rei
114#endif
115
116
117// redefine a few of the distribution-code names to match the Hudson gas names.
118// opcodes
119#define ldqp ldq_p
120#define stqp stq_p
121#define ldlp ldl_p
122#define stlp stl_p
123
124#define r0 $0
125#define r1 $1
126#define r2 $2
127#define r3 $3
128#define r4 $4
129#define r5 $5
130#define r6 $6
131#define r7 $7
132#define r8 $8
133#define r9 $9
134#define r10 $10
135#define r11 $11
136#define r12 $12
137#define r13 $13
138#define r14 $14
139#define r15 $15
140#define r16 $16
141#define r17 $17
142#define r18 $18
143#define r19 $19
144#define r20 $20
145#define r21 $21
146#define r22 $22
147#define r23 $23
148#define r24 $24
149#define r25 $25
150#define r26 $26
151#define r27 $27
152#define r28 $28
153#define r29 $29
154#define r30 $30
155#define r31 $31
156
157// .title "EV5 OSF PAL"
158// .ident "V1.18"
159//
160//****************************************************************************
161//* *
162//* Copyright (c) 1992, 1993, 1994, 1995 *
163//* by DIGITAL Equipment Corporation, Maynard, Mass. *
164//* *
165//* This software is furnished under a license and may be used and copied *
166//* only in accordance with the terms of such license and with the *
167//* inclusion of the above copyright notice. This software or any other *
168//* copies thereof may not be provided or otherwise made available to any *
169//* other person. No title to and ownership of the software is hereby *
170//* transferred. *
171//* *
172//* The information in this software is subject to change without notice *
173//* and should not be construed as a commitment by DIGITAL Equipment *
174//* Corporation. *
175//* *
176//* DIGITAL assumes no responsibility for the use or reliability of its *
177//* software on equipment which is not supplied by DIGITAL. *
178//* *
179//****************************************************************************
180
181// .sbttl "Edit History"
182//+
183// Who Rev When What
184// ------------ --- ----------- --------------------------------
185// DB 0.0 03-Nov-1992 Start
186// DB 0.1 28-Dec-1992 add swpctx
187// DB 0.2 05-Jan-1993 Bug: PVC found mtpr dtb_CM -> virt ref bug
188// DB 0.3 11-Jan-1993 rearrange trap entry points
189// DB 0.4 01-Feb-1993 add tbi
190// DB 0.5 04-Feb-1993 real MM, kludge reset flow, kludge swppal
191// DB 0.6 09-Feb-1993 Bug: several stack pushers used r16 for pc (should be r14)
192// DB 0.7 10-Feb-1993 Bug: pushed wrong PC (+8) on CALL_PAL OPCDEC
193// Bug: typo on register number for store in wrunique
194// Bug: rti to kern uses r16 as scratch
195// Bug: callsys saving wrong value in pt_usp
196// DB 0.8 16-Feb-1993 PVC: fix possible pt write->read bug in wrkgp, wrusp
197// DB 0.9 18-Feb-1993 Bug: invalid_dpte_handler shifted pte twice
198// Bug: rti stl_c could corrupt the stack
199// Bug: unaligned returning wrong value in r17 (or should be and)
200// DB 0.10 19-Feb-1993 Add draina, rd/wrmces, cflush, cserve, interrupt
201// DB 0.11 23-Feb-1993 Turn caches on in reset flow
202// DB 0.12 10-Mar-1993 Bug: wrong value for icsr for FEN in kern mode flow
203// DB 0.13 15-Mar-1993 Bug: wrong value pushed for PC in invalid_dpte_handler if stack push tbmisses
204// DB 0.14 23-Mar-1993 Add impure pointer paltemp, reshuffle some other paltemps to match VMS
205// DB 0.15 15-Apr-1993 Combine paltemps for WHAMI and MCES
206// DB 0.16 12-May-1993 Update reset
207// New restriction: no mfpr exc_addr in cycle 1 of call_pal flows
208// Bug: in wrmces, not clearing DPC, DSC
209// Update swppal
210// Add pal bugchecks, pal_save_state, pal_restore_state
211// DB 0.17 24-May-1993 Add dfault_in_pal flow; fixup stack builder to have common state for pc/ps.
212// New restriction: No hw_rei_stall in 0,1,2 after mtpr itb_asn
213// DB 0.18 26-May-1993 PVC fixes
214// JM 0.19 01-jul-1993 Bug: OSFPAL_CALPAL_OPCDEC, TRAP_OPCDEC -- move mt exc_addr after stores
215// JM 0.20 07-jul-1993 Update cns_ and mchk_ names for impure.mar conversion to .sdl
216// Bug: exc_addr was being loaded before stores that could dtb_miss in the following
217// routines: TRAP_FEN,FEN_TO_OPCDEC,CALL_PAL_CALLSYS,RTI_TO_KERN
218// JM 0.21 26-jul-1993 Bug: move exc_addr load after ALL stores in the following routines:
219// TRAP_IACCVIO::,TRAP_OPCDEC::,TRAP_ARITH::,TRAP_FEN::
220// dfault_trap_cont:,fen_to_opcdec:,invalid_dpte_handler:
221// osfpal_calpal_opcdec:,CALL_PAL_callsys::,TRAP_UNALIGN::
222// Bugs from PVC: trap_unalign - mt pt0 ->mf pt0 within 2 cycles
223// JM 0.22 28-jul-1993 Add WRIPIR instruction
224// JM 0.23 05-aug-1993 Bump version number for release
225// JM 0.24 11-aug-1993 Bug: call_pal_swpipl - palshadow write -> hw_rei violation
226// JM 0.25 09-sep-1993 Disable certain "hidden" pvc checks in call_pals;
227// New restriction: No hw_rei_stall in 0,1,2,3,4 after mtpr itb_asn - affects HALT(raxmode),
228// and SWPCTX
229// JM 0.26 07-oct-1993 Re-implement pal_version
230// JM 0.27 12-oct-1993 One more time: change pal_version format to conform to SRM
231// JM 0.28 14-oct-1993 Change ic_flush routine to pal_ic_flush
232// JM 0.29 19-oct-1993 BUG(?): dfault_in_pal: use exc_addr to check for dtbmiss,itbmiss check instead
233// of mm_stat<opcode>. mm_stat contains original opcode, not hw_ld.
234// JM 0.30 28-oct-1993 BUG: PVC violation - mf exc_addr in first cycles of call_pal in rti,retsys
235// JM 0.31 15-nov-1993 BUG: WRFEN trashing r0
236// JM 0.32 21-nov-1993 BUG: dtb_ldq,itb_ldq (used in dfault_in_pal) not defined when real_mm=0
237// JM 0.33 24-nov-1993 save/restore_state -
238// BUG: use ivptbr to restore mvptbr
239// BUG: adjust hw_ld/st base/offsets to accomodate 10-bit offset limit
240// CHANGE: Load 2 pages into dtb to accomodate compressed logout area/multiprocessors
241// JM 0.34 20-dec-1993 BUG: set r11<mode> to kernel for ksnv halt case
242// BUG: generate ksnv halt when tb miss on kernel stack accesses
243// save exc_addr in r14 for invalid_dpte stack builder
244// JM 0.35 30-dec-1993 BUG: PVC violation in trap_arith - mt exc_sum in shadow of store with mf exc_mask in
245// the same shadow
246// JM 0.36 6-jan-1994 BUG: fen_to_opcdec - savePC should be PC+4, need to save old PS, update new PS
247// New palcode restiction: mt icsr<fpe,hwe> --> 3 bubbles to hw_rei --affects wrfen
248// JM 0.37 25-jan-1994 BUG: PVC violations in restore_state - mt dc_mode/maf_mode ->mbox instructions
249// Hide impure area manipulations in macros
250// BUG: PVC violation in save and restore state-- move mt icsr out of shadow of ld/st
251// Add some pvc_violate statements
252// JM 0.38 1-feb-1994 Changes to save_state: save pt1; don't save r31,f31; update comments to reflect reality;
253// Changes to restore_state: restore pt1, icsr; don't restore r31,f31; update comments
254// Add code to ensure fen bit set in icsr before ldt
255// conditionally compile rax_more_reset out.
256// move ldqp,stqp macro definitions to ev5_pal_macros.mar and add .mcall's for them here
257// move rax reset stuff to ev5_osf_system_pal.m64
258// JM 0.39 7-feb-1994 Move impure pointer to pal scratch space. Use former pt_impure for bc_ctl shadow
259// and performance monitoring bits
260// Change to save_state routine to save more iprs.
261// JM 0.40 19-feb-1994 Change algorithm in save/restore_state routines; add f31,r31 back in
262// JM 0.41 21-feb-1994 Add flags to compile out save/restore state (not needed in some systems)
263// remove_save_state,remove_restore_state;fix new pvc violation in save_state
264// JM 0.42 22-feb-1994 BUG: save_state overwriting r3
265// JM 0.43 24-feb-1994 BUG: save_state saving wrong icsr
266// JM 0.44 28-feb-1994 Remove ic_flush from wr_tbix instructions
267// JM 0.45 15-mar-1994 BUG: call_pal_tbi trashes a0 prior to range check (instruction order problem)
268// New pal restriction in pal_restore_state: icsr<fpe>->floating instr = 3 bubbles
269// Add exc_sum and exc_mask to pal_save_state (not restore)
270// JM 0.46 22-apr-1994 Move impure pointer back into paltemp; Move bc_ctl shadow and pmctr_ctl into impure
271// area.
272// Add performance counter support to swpctx and wrperfmon
273// JM 0.47 9-may-1994 Bump version # (for ev5_osf_system_pal.m64 sys_perfmon fix)
274// JM 0.48 13-jun-1994 BUG: trap_interrupt --> put new ev5 ipl at 30 for all osfipl6 interrupts
275// JM 0.49 8-jul-1994 BUG: In the unlikely (impossible?) event that the branch to pal_pal_bug_check is
276// taken in the interrupt flow, stack is pushed twice.
277// SWPPAL - update to support ECO 59 to allow 0 as a valid address
278// Add itb flush to save/restore state routines
279// Change hw_rei to hw_rei_stall in ic_flush routine. Shouldn't be necessary, but
280// conforms to itbia restriction.
281// Added enable_physical_console flag (for enter/exit console routines only)
282// JM 0.50 29-jul-1994 Add code to dfault & invalid_dpte_handler to ignore exceptions on a
283// load to r31/f31. changed dfault_fetch_err to dfault_fetch_ldr31_err and
284// nmiss_fetch_err to nmiss_fetch_ldr31_err.
285// JM 1.00 1-aug-1994 Add pass2 support (swpctx)
286// JM 1.01 2-aug-1994 swppal now passes bc_ctl/bc_config in r1/r2
287// JM 1.02 15-sep-1994 BUG: swpctx missing shift of pme bit to correct position in icsr (pass2)
288// Moved perfmon code here from system file.
289// BUG: pal_perfmon - enable function not saving correct enables when pme not set (pass1)
290// JM 1.03 3-oct-1994 Added (pass2 only) code to wrperfmon enable function to look at pme bit.
291// JM 1.04 14-oct-1994 BUG: trap_interrupt - ISR read (and saved) before INTID -- INTID can change
292// after ISR read, but we won't catch the ISR update. reverse order
293// JM 1.05 17-nov-1994 Add code to dismiss UNALIGN trap if LD r31/F31
294// JM 1.06 28-nov-1994 BUG: missing mm_stat shift for store case in trap_unalign (new bug due to "dismiss" code)
295// JM 1.07 1-dec-1994 EV5 PASS1,2,3 BUG WORKAROUND: Add flag LDVPTE_BUG_FIX. In DTBMISS_DOUBLE, branch to
296// DTBMISS_SINGLE if not in palmode.
297// JM 1.08 9-jan-1995 Bump version number for change to EV5_OSF_SYSTEM_PAL.M64 - ei_stat fix in mchk logout frame
298// JM 1.09 2-feb-1995 Add flag "spe_fix" and accompanying code to workaround pre-pass4 bug: Disable Ibox
299// superpage mode in User mode and re-enable in kernel mode.
300// EV5_OSF_SYSTEM_PAL.M64 and EV5_PALDEF.MAR (added pt_misc_v_cm) also changed to support this.
301// JM 1.10 24-feb-1995 Set ldvpte_bug_fix regardless of ev5 pass. set default to ev5_p2
302// ES 1.11 10-mar-1995 Add flag "osf_chm_fix" to enable dcache in user mode only to avoid
303// cpu bug.
304// JM 1.12 17-mar-1995 BUG FIX: Fix F0 corruption problem in pal_restore_state
305// ES 1.13 17-mar-1995 Refine osf_chm_fix
306// ES 1.14 20-mar-1995 Don't need as many stalls before hw_rei_stall in chm_fix
307// ES 1.15 21-mar-1995 Add a stall to avoid a pvc violation in pal_restore_state
308// Force pvc checking of exit_console
309// ES 1.16 26-apr-1995 In the wrperfmon disable function, correct meaning of R17<2:0> to ctl2,ctl2,ctl0
310// ES 1.17 01-may-1995 In hw_rei_update_spe code, in the osf_chm fix, use bic and bis (self-correcting)
311// instead of xor to maintain previous mode in pt_misc
312// ES 1.18 14-jul-1995 In wrperfmon enable on pass2, update pmctr even if current process does
313// not have pme set. The bits in icsr maintain the master enable state.
314// In sys_reset, add icsr<17>=1 for ev56 byte/word eco enable
315//
316#define vmaj 1
317#define vmin 18
318#define vms_pal 1
319#define osf_pal 2
320#define pal_type osf_pal
321#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
322//-
323
324// .sbttl "PALtemp register usage"
325
326//+
327// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
328// for these PALtemps:
329//
330// pt0 local scratch
331// pt1 local scratch
332// pt2 entUna pt_entUna
333// pt3 CPU specific impure area pointer pt_impure
334// pt4 memory management temp
335// pt5 memory management temp
336// pt6 memory management temp
337// pt7 entIF pt_entIF
338// pt8 intmask pt_intmask
339// pt9 entSys pt_entSys
340// pt10
341// pt11 entInt pt_entInt
342// pt12 entArith pt_entArith
343// pt13 reserved for system specific PAL
344// pt14 reserved for system specific PAL
345// pt15 reserved for system specific PAL
346// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami, pt_mces
347// pt17 sysval pt_sysval
348// pt18 usp pt_usp
349// pt19 ksp pt_ksp
350// pt20 PTBR pt_ptbr
351// pt21 entMM pt_entMM
352// pt22 kgp pt_kgp
353// pt23 PCBB pt_pcbb
354//
355//-
356
357// .sbttl "PALshadow register usage"
358//
359//+
360//
361// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
362// This maps the OSF PAL usage of R8 - R14 and R25:
363//
364// r8 ITBmiss/DTBmiss scratch
365// r9 ITBmiss/DTBmiss scratch
366// r10 ITBmiss/DTBmiss scratch
367// r11 PS
368// r12 local scratch
369// r13 local scratch
370// r14 local scratch
371// r25 local scratch
372//
373//
374//-
375
376// .sbttl "ALPHA symbol definitions"
377// _OSF_PSDEF GLOBAL
378// _OSF_PTEDEF GLOBAL
379// _OSF_VADEF GLOBAL
380// _OSF_PCBDEF GLOBAL
381// _OSF_SFDEF GLOBAL
382// _OSF_MMCSR_DEF GLOBAL
383// _SCBDEF GLOBAL
384// _FRMDEF GLOBAL
385// _EXSDEF GLOBAL
386// _OSF_A0_DEF GLOBAL
387// _MCESDEF GLOBAL
388
389// .sbttl "EV5 symbol definitions"
390
391// _EV5DEF
392// _PALTEMP
393// _MM_STAT_DEF
394// _EV5_MM
395// _EV5_IPLDEF
396
397// _HALT_CODES GLOBAL
398// _MCHK_CODES GLOBAL
399
400// _PAL_IMPURE
401// _PAL_LOGOUT
402
403
404
405
406// .sbttl "PALcode configuration options"
407
408// There are a number of options that may be assembled into this version of
409// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
410// the following). The options that can be adjusted cause the resultant PALcode
411// to reflect the desired target system.
412
413
414#define osfpal 1 // This is the PALcode for OSF.
415
416#ifndef rawhide_system
417
418#define rawhide_system 0
419#endif
420
421
422#ifndef real_mm
423// Page table translation vs 1-1 mapping
424#define real_mm 1
425#endif
426
427
428#ifndef rax_mode
429
430#define rax_mode 0
431#endif
432
433#ifndef egore
434// End of reset flow starts a program at 200000(hex).
435#define egore 1
436#endif
437
438#ifndef acore
439// End of reset flow starts a program at 40000(hex).
440#define acore 0
441#endif
442
443
444// assume acore+egore+rax_mode lt 2 // Assertion checker
445
446#ifndef beh_model
447// EV5 behavioral model specific code
448#define beh_model 1
449#endif
450
451#ifndef init_cbox
452// Reset flow init of Bcache and Scache
453#define init_cbox 1
454#endif
455
456#ifndef disable_crd
457// Decides whether the reset flow will disable
458#define disable_crd 0
459#endif
460
461 // correctable read interrupts via ICSR
462#ifndef perfmon_debug
463#define perfmon_debug 0
464#endif
465
466#ifndef icflush_on_tbix
467#define icflush_on_tbix 0
468#endif
469
470#ifndef remove_restore_state
471#define remove_restore_state 0
472#endif
473
474#ifndef remove_save_state
475#define remove_save_state 0
476#endif
477
478#ifndef enable_physical_console
479#define enable_physical_console 0
480#endif
481
482#ifndef ev5_p1
483#define ev5_p1 0
484#endif
485
486#ifndef ev5_p2
487#define ev5_p2 1
488#endif
489
490// assume ev5_p1+ev5_p2 eq 1
491
492#ifndef ldvpte_bug_fix
493#define ldvpte_bug_fix 1 // If set, fix ldvpte bug in dtbmiss_double flow.
494#endif
495
496#ifndef spe_fix
497// If set, disable super-page mode in user mode and re-enable
498#define spe_fix 0
499#endif
500 // in kernel. Workaround for cpu bug.
501#ifndef build_fixed_image
502#define build_fixed_image 0
503#endif
504
505
506#ifndef fill_err_hack
507// If set, disable fill_error mode in user mode and re-enable
508#define fill_err_hack 0
509#endif
510
511 // in kernel. Workaround for cpu bug.
512
513// .macro hw_rei_spe
514// .iif eq spe_fix, hw_rei
515//#if spe_fix != 0
516//
517//
518//#define hw_rei_chm_count hw_rei_chm_count + 1
519// p4_fixup_label \hw_rei_chm_count
520// .iif eq build_fixed_image, br r31, hw_rei_update_spe
521// .iif ne build_fixed_image, hw_rei
522//#endif
523//
524// .endm
525
526// Add flag "osf_chm_fix" to enable dcache in user mode only
527// to avoid cpu bug.
528
529#ifndef osf_chm_fix
530// If set, enable D-Cache in
531#define osf_chm_fix 0
532#endif
533
534#if osf_chm_fix != 0
535// user mode only.
536#define hw_rei_chm_count 0
537#endif
538
539#if osf_chm_fix != 0
540
541#define hw_rei_stall_chm_count 0
542#endif
543
544#ifndef enable_p4_fixups
545
546#define enable_p4_fixups 0
547#endif
548
549 // If set, do EV5 Pass 4 fixups
550#if spe_fix == 0
551
552#define osf_chm_fix 0
553#endif
554
555#if spe_fix == 0
556
557#define enable_p4_fixups 0
558#endif
559
560 // Only allow fixups if fix enabled
561
562 //Turn off fill_errors and MEM_NEM in user mode
563// .macro fill_error_hack ?L10_, ?L20_, ?L30_, ?L40_
564// //save r22,r23,r24
565// stqp r22, 0x150(r31) //add
566// stqp r23, 0x158(r31) //contents
567// stqp r24, 0x160(r31) //bit mask
568//
569// lda r22, 0x82(r31)
570// ldah r22, 0x8740(r22)
571// sll r22, 8, r22
572// ldlp r23, 0x80(r22) // r23 <- contents of CIA_MASK
573// bis r23,r31,r23
574//
575// lda r24, 0x8(r31) // r24 <- MEM_NEM bit
576// beq r10, L10_ // IF user mode (r10<0> == 0) pal mode
577// bic r23, r24, r23 // set fillerr_en bit
578// br r31, L20_ // ELSE
579//L10_: bis r23, r24, r23 // clear fillerr_en bit
580//L20_: // ENDIF
581//
582// stlp r23, 0x80(r22) // write back the CIA_MASK register
583// mb
584// ldlp r23, 0x80(r22)
585// bis r23,r31,r23
586// mb
587//
588// lda r22, 1(r31) // r22 <- 87.4000.0100 ptr to CIA_CTRL
589// ldah r22, 0x8740(r22)
590// sll r22, 8, r22
591// ldlp r23, 0(r22) // r23 <- contents of CIA_CTRL
592// bis r23,r31,r23
593//
594//
595// lda r24, 0x400(r31) // r9 <- fillerr_en bit
596// beq r10, L30_ // IF user mode (r10<0> == 0) pal mode
597// bic r23, r24, r23 // set fillerr_en bit
598// br r31, L40_ // ELSE
599//L30_: bis r23, r24, r23 // clear fillerr_en bit
600//L40_: // ENDIF
601//
602// stlp r23, 0(r22) // write back the CIA_CTRL register
603// mb
604// ldlp r23, 0(r22)
605// bis r23,r31,r23
606// mb
607//
608// //restore r22,r23,r24
609// ldqp r22, 0x150(r31)
610// ldqp r23, 0x158(r31)
611// ldqp r24, 0x160(r31)
612//
613// .endm
614
615// multiprocessor support can be enabled for a max of n processors by
616// setting the following to the number of processors on the system.
617// Note that this is really the max cpuid.
618
619#ifndef max_cpuid
620#define max_cpuid 8
621#endif
622
623#ifndef osf_svmin // platform specific palcode version number
624#define osf_svmin 0
625#endif
626
627
628#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
629
630// .mcall ldqp // override macro64 definition with macro from library
631// .mcall stqp // override macro64 definition with macro from library
632
633
634// .psect _pal,mix
635// huh pb pal_base:
636// huh pb #define current_block_base . - pal_base
637
638// .sbttl "RESET - Reset Trap Entry Point"
639//+
640// RESET - offset 0000
641// Entry:
642// Vectored into via hardware trap on reset, or branched to
643// on swppal.
644//
645// r0 = whami
646// r1 = pal_base
647// r2 = base of scratch area
648// r3 = halt code
649//
650//
651// Function:
652//
653//-
654
655 .text 0
656 . = 0x0000
657 .globl Pal_Base
658Pal_Base:
659 HDW_VECTOR(PAL_RESET_ENTRY)
660Trap_Reset:
661 nop
662#ifdef SIMOS
663 /*
664 * store into r1
665 */
666 br r1,sys_reset
667#else
668 /* following is a srcmax change */
669
670 DEBUGSTORE(0x41)
671 /* The original code jumped using r1 as a linkage register to pass the base
672 of PALcode to the platform specific code. We use r1 to pass a parameter
673 from the SROM, so we hardcode the address of Pal_Base in platform.s
674 */
675 br r31, sys_reset
676#endif
677
678 // Specify PAL version info as a constant
679 // at a known location (reset + 8).
680
681 .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
682 .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
683 .long 0
684 .long 0
685pal_impure_start:
686 .quad 0
687pal_debug_ptr:
688 .quad 0 // reserved for debug pointer ; 20
689#if beh_model == 0
690
691
692#if enable_p4_fixups != 0
693
694
695 .quad 0
696 .long p4_fixup_hw_rei_fixup_table
697#endif
698
699#else
700
701 .quad 0 //
702 .quad 0 //0x0030
703 .quad 0
704 .quad 0 //0x0040
705 .quad 0
706 .quad 0 //0x0050
707 .quad 0
708 .quad 0 //0x0060
709 .quad 0
710pal_enter_cns_address:
711 .quad 0 //0x0070 -- address to jump to from enter_console
712 .long <<sys_exit_console-pal_base>+1> //0x0078 -- offset to sys_exit_console (set palmode bit)
713#endif
714
715
716
717
718// .sbttl "IACCVIO- Istream Access Violation Trap Entry Point"
719
720//+
721// IACCVIO - offset 0080
722// Entry:
723// Vectored into via hardware trap on Istream access violation or sign check error on PC.
724//
725// Function:
726// Build stack frame
727// a0 <- Faulting VA
728// a1 <- MMCSR (1 for ACV)
729// a2 <- -1 (for ifetch fault)
730// vector via entMM
731//-
732
733 HDW_VECTOR(PAL_IACCVIO_ENTRY)
734Trap_Iaccvio:
735 DEBUGSTORE(0x42)
736 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
737 mtpr r31, ev5__ps // Set Ibox current mode to kernel
738
739 bis r11, r31, r12 // Save PS
740 bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
741
742
743 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
744 // no virt ref for next 2 cycles
745 mtpr r30, pt_usp // save user stack
746
747 bis r31, r31, r12 // Set new PS
748 mfpr r30, pt_ksp
749
750TRAP_IACCVIO_10_:
751 lda sp, 0-osfsf_c_size(sp)// allocate stack space
752 mfpr r14, exc_addr // get pc
753
754 stq r16, osfsf_a0(sp) // save regs
755 bic r14, 3, r16 // pass pc/va as a0
756
757 stq r17, osfsf_a1(sp) // a1
758 or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
759
760 stq r18, osfsf_a2(sp) // a2
761 mfpr r13, pt_entmm // get entry point
762
763 stq r11, osfsf_ps(sp) // save old ps
764 bis r12, r31, r11 // update ps
765
766 stq r16, osfsf_pc(sp) // save pc
767 stq r29, osfsf_gp(sp) // save gp
768
769 mtpr r13, exc_addr // load exc_addr with entMM
770 // 1 cycle to hw_rei
771 mfpr r29, pt_kgp // get the kgp
772
773 subq r31, 1, r18 // pass flag of istream, as a2
774 hw_rei_spe
775
776
777// .sbttl "INTERRUPT- Interrupt Trap Entry Point"
778
779//+
780// INTERRUPT - offset 0100
781// Entry:
782// Vectored into via trap on hardware interrupt
783//
784// Function:
785// check for halt interrupt
786// check for passive release (current ipl geq requestor)
787// if necessary, switch to kernel mode
788// push stack frame, update ps (including current mode and ipl copies), sp, and gp
789// pass the interrupt info to the system module
790//
791//-
792
793
794 HDW_VECTOR(PAL_INTERRUPT_ENTRY)
795Trap_Interrupt:
796 mfpr r13, ev5__intid // Fetch level of interruptor
797 mfpr r25, ev5__isr // Fetch interrupt summary register
798
799 srl r25, isr_v_hlt, r9 // Get HLT bit
800 mfpr r14, ev5__ipl
801
802 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
803 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
804
805 cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
806 bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
807
808 and r11, osfps_m_mode, r10 // get mode bit
809 beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
810
811 mtpr r30, pt_usp // save user stack
812 mfpr r30, pt_ksp // get kern stack
813
814TRAP_INTERRUPT_10_:
815 lda sp, (0-osfsf_c_size)(sp)// allocate stack space
816 mfpr r14, exc_addr // get pc
817
818 stq r11, osfsf_ps(sp) // save ps
819 stq r14, osfsf_pc(sp) // save pc
820
821 stq r29, osfsf_gp(sp) // push gp
822 stq r16, osfsf_a0(sp) // a0
823
824// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
825 mtpr r31, ev5__ps // Set Ibox current mode to kernel
826 stq r17, osfsf_a1(sp) // a1
827
828 stq r18, osfsf_a2(sp) // a2
829 subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
830
831 srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
832 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
833
834 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
835 bis r12, r31, r11 // set new ps
836
837 mfpr r12, pt_intmask
838 and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
839
840#ifdef SIMOS
841 /*
842 * Lance had space problems. We don't.
843 */
844 extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
845 mfpr r29, pt_kgp // update gp
846 mtpr r14, ev5__ipl // load the new IPL into Ibox
847#else
848// Moved the following three lines to sys_interrupt to make room for debug
849// extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
850// mfpr r29, pt_kgp // update gp
851
852// mtpr r14, ev5__ipl // load the new IPL into Ibox
853#endif
854 br r31, sys_interrupt // Go handle interrupt
855
856
857
858// .sbttl "ITBMISS- Istream TBmiss Trap Entry Point"
859
860//+
861// ITBMISS - offset 0180
862// Entry:
863// Vectored into via hardware trap on Istream translation buffer miss.
864//
865// Function:
866// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
867// Can trap into DTBMISS_DOUBLE.
868// This routine can use the PALshadow registers r8, r9, and r10
869//
870//-
871
872 HDW_VECTOR(PAL_ITB_MISS_ENTRY)
873Trap_Itbmiss:
874#if real_mm == 0
875
876
877 // Simple 1-1 va->pa mapping
878
879 nop // Pad to align to E1
880 mfpr r8, exc_addr
881
882 srl r8, page_offset_size_bits, r9
883 sll r9, 32, r9
884
885 lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
886 mtpr r9, itb_pte // E1
887
888 hw_rei_stall // Nital says I don't have to obey shadow wait rule here.
889#else
890
891 // Real MM mapping
892 nop
893 mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
894
895 nop
896 mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
897
898pal_itb_ldq:
899 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
900 mtpr r10, exc_addr // Restore exc_address if there was a trap.
901
902 mfpr r31, ev5__va // Unlock VA in case there was a double miss
903 nop
904
905 and r8, osfpte_m_foe, r25 // Look for FOE set.
906 blbc r8, invalid_ipte_handler // PTE not valid.
907
908 nop
909 bne r25, foe_ipte_handler // FOE is set
910
911 nop
912 mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
913
914 hw_rei_stall //
915
916#endif
917
918
919
920
921// .sbttl "DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point"
922
923//+
924// DTBMISS_SINGLE - offset 0200
925// Entry:
926// Vectored into via hardware trap on Dstream single translation buffer miss.
927//
928// Function:
929// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
930// Can trap into DTBMISS_DOUBLE.
931// This routine can use the PALshadow registers r8, r9, and r10
932//-
933
934 HDW_VECTOR(PAL_DTB_MISS_ENTRY)
935Trap_Dtbmiss_Single:
936#if real_mm == 0
937 // Simple 1-1 va->pa mapping
938 mfpr r8, va // E0
939 srl r8, page_offset_size_bits, r9
940
941 sll r9, 32, r9
942 lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
943
944 mtpr r9, dtb_pte // E0
945 nop // Pad to align to E0
946
947
948
949 mtpr r8, dtb_tag // E0
950 nop
951
952 nop // Pad tag write
953 nop
954
955 nop // Pad tag write
956 nop
957
958 hw_rei
959#else
960 mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
961 mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
962
963// DEBUGSTORE(0x45)
964// DEBUG_EXC_ADDR()
965 // Real MM mapping
966 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
967 mtpr r10, pt6 // Stash exc_addr away
968
969pal_dtb_ldq:
970 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
971 nop // Pad MF VA
972
973 mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
974 nop
975
976 mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
977 blbc r8, invalid_dpte_handler // Handle invalid PTE
978
979 mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
980 mfpr r10, pt6
981
982 // Following 2 instructions take 2 cycles
983 mtpr r10, exc_addr // Return linkage in case we trapped. E1.
984 mfpr r31, pt0 // Pad the write to dtb_tag
985
986 hw_rei // Done, return
987#endif
988
989
990
991
992// .sbttl "DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point"
993
994//+
995// DTBMISS_DOUBLE - offset 0280
996// Entry:
997// Vectored into via hardware trap on Double TBmiss from single miss flows.
998//
999// r8 - faulting VA
1000// r9 - original MMstat
1001// r10 - original exc_addr (both itb,dtb miss)
1002// pt6 - original exc_addr (dtb miss flow only)
1003// VA IPR - locked with original faulting VA
1004//
1005// Function:
1006// Get PTE, if valid load TB and return.
1007// If not valid then take TNV/ACV exception.
1008//
1009// pt4 and pt5 are reserved for this flow.
1010//
1011//
1012//-
1013
1014 HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
1015Trap_Dtbmiss_double:
1016#if ldvpte_bug_fix != 0
1017 mtpr r8, pt4 // save r8 to do exc_addr check
1018 mfpr r8, exc_addr
1019 blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
1020 mfpr r8, pt4 // restore r8
1021#endif
1022 nop
1023 mtpr r22, pt5 // Get some scratch space. E1.
1024 // Due to virtual scheme, we can skip the first lookup and go
1025 // right to fetch of level 2 PTE
1026 sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
1027 mtpr r21, pt4 // Get some scratch space. E1.
1028
1029 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
1030 mfpr r21, pt_ptbr // Get physical address of the page table.
1031
1032 nop
1033 addq r21, r22, r21 // Index into page table for level 2 PTE.
1034
1035 sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
1036 ldqp r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
1037
1038 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
1039 blbc r21, double_pte_inv // Check for Invalid PTE.
1040
1041 srl r21, 32, r21 // extract PFN from PTE
1042 sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
1043
1044 addq r21, r22, r21 // Index into page table for level 3 PTE.
1045 nop
1046
1047 ldqp r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
1048 blbc r21, double_pte_inv // Check for invalid PTE.
1049
1050 mtpr r21, ev5__dtb_pte // Write the PTE. E0.
1051 mfpr r22, pt5 // Restore scratch register
1052
1053 mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
1054 mfpr r21, pt4 // Restore scratch register
1055
1056 nop // Pad write to tag.
1057 nop
1058
1059 nop // Pad write to tag.
1060 nop
1061
1062 hw_rei
1063
1064
1065
1066// .sbttl "UNALIGN -- Dstream unalign trap"
1067//+
1068// UNALIGN - offset 0300
1069// Entry:
1070// Vectored into via hardware trap on unaligned Dstream reference.
1071//
1072// Function:
1073// Build stack frame
1074// a0 <- Faulting VA
1075// a1 <- Opcode
1076// a2 <- src/dst register number
1077// vector via entUna
1078//-
1079
1080 HDW_VECTOR(PAL_UNALIGN_ENTRY)
1081Trap_Unalign:
1082/* DEBUGSTORE(0x47)*/
1083 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1084 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1085
1086 mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
1087 mfpr r14, exc_addr // get pc
1088
1089 srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
1090 blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
1091
1092 blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
1093 // not set, must be a load
1094 and r13, 0x1F, r8 // isolate ra
1095
1096 cmpeq r8, 0x1F, r8 // check for r31/F31
1097 bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
1098
1099UNALIGN_NO_DISMISS:
1100 bis r11, r31, r12 // Save PS
1101 bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
1102
1103
1104 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1105 // no virt ref for next 2 cycles
1106 mtpr r30, pt_usp // save user stack
1107
1108 bis r31, r31, r12 // Set new PS
1109 mfpr r30, pt_ksp
1110
1111UNALIGN_NO_DISMISS_10_:
1112 mfpr r25, ev5__va // Unlock VA
1113 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1114
1115 mtpr r25, pt0 // Stash VA
1116 stq r18, osfsf_a2(sp) // a2
1117
1118 stq r11, osfsf_ps(sp) // save old ps
1119 srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
1120
1121 stq r29, osfsf_gp(sp) // save gp
1122 addq r14, 4, r14 // inc PC past the ld/st
1123
1124 stq r17, osfsf_a1(sp) // a1
1125 and r25, mm_stat_m_opcode, r17// Clean opocde for a1
1126
1127 stq r16, osfsf_a0(sp) // save regs
1128 mfpr r16, pt0 // a0 <- va/unlock
1129
1130 stq r14, osfsf_pc(sp) // save pc
1131 mfpr r25, pt_entuna // get entry point
1132
1133
1134 bis r12, r31, r11 // update ps
1135 br r31, unalign_trap_cont
1136
1137
1138
1139
1140// .sbttl "DFAULT - Dstream Fault Trap Entry Point"
1141
1142//+
1143// DFAULT - offset 0380
1144// Entry:
1145// Vectored into via hardware trap on dstream fault or sign check error on DVA.
1146//
1147// Function:
1148// Ignore faults on FETCH/FETCH_M
1149// Check for DFAULT in PAL
1150// Build stack frame
1151// a0 <- Faulting VA
1152// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
1153// a2 <- R/W
1154// vector via entMM
1155//
1156//-
1157 HDW_VECTOR(PAL_D_FAULT_ENTRY)
1158Trap_Dfault:
1159// DEBUGSTORE(0x48)
1160 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1161 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1162
1163 mfpr r13, ev5__mm_stat // Get mmstat
1164 mfpr r8, exc_addr // get pc, preserve r14
1165
1166 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
1167 blbs r8, dfault_in_pal
1168
1169 bis r8, r31, r14 // move exc_addr to correct place
1170 bis r11, r31, r12 // Save PS
1171
1172 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1173 // no virt ref for next 2 cycles
1174 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
1175
1176 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
1177 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
1178
1179 //dismiss exception if load to r31/f31
1180 blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
1181
1182 // not a store or fetch, must be a load
1183 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
1184
1185 and r9, 0x1F, r9 // isolate rnum
1186 nop
1187
1188 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
1189 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
1190
1191dfault_no_dismiss:
1192 and r13, 0xf, r13 // Clean extra bits in mm_stat
1193 bge r25, dfault_trap_cont // no stack swap needed if cm=kern
1194
1195
1196 mtpr r30, pt_usp // save user stack
1197 bis r31, r31, r12 // Set new PS
1198
1199 mfpr r30, pt_ksp
1200 br r31, dfault_trap_cont
1201
1202
1203
1204
1205
1206// .sbttl "MCHK - Machine Check Trap Entry Point"
1207
1208//+
1209// MCHK - offset 0400
1210// Entry:
1211// Vectored into via hardware trap on machine check.
1212//
1213// Function:
1214//
1215//-
1216
1217 HDW_VECTOR(PAL_MCHK_ENTRY)
1218Trap_Mchk:
1219 DEBUGSTORE(0x49)
1220 mtpr r31, ic_flush_ctl // Flush the Icache
1221 br r31, sys_machine_check
1222
1223
1224
1225
1226// .sbttl "OPCDEC - Illegal Opcode Trap Entry Point"
1227
1228//+
1229// OPCDEC - offset 0480
1230// Entry:
1231// Vectored into via hardware trap on illegal opcode.
1232//
1233// Build stack frame
1234// a0 <- code
1235// a1 <- unpred
1236// a2 <- unpred
1237// vector via entIF
1238//
1239//-
1240
1241 HDW_VECTOR(PAL_OPCDEC_ENTRY)
1242Trap_Opcdec:
1243 DEBUGSTORE(0x4a)
1244//simos DEBUG_EXC_ADDR()
1245 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1246 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1247
1248 mfpr r14, exc_addr // get pc
1249 blbs r14, pal_pal_bug_check // check opcdec in palmode
1250
1251 bis r11, r31, r12 // Save PS
1252 bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
1253
1254
1255 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1256 // no virt ref for next 2 cycles
1257 mtpr r30, pt_usp // save user stack
1258
1259 bis r31, r31, r12 // Set new PS
1260 mfpr r30, pt_ksp
1261
1262TRAP_OPCDEC_10_:
1263 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1264 addq r14, 4, r14 // inc pc
1265
1266 stq r16, osfsf_a0(sp) // save regs
1267 bis r31, osf_a0_opdec, r16 // set a0
1268
1269 stq r11, osfsf_ps(sp) // save old ps
1270 mfpr r13, pt_entif // get entry point
1271
1272 stq r18, osfsf_a2(sp) // a2
1273 stq r17, osfsf_a1(sp) // a1
1274
1275 stq r29, osfsf_gp(sp) // save gp
1276 stq r14, osfsf_pc(sp) // save pc
1277
1278 bis r12, r31, r11 // update ps
1279 mtpr r13, exc_addr // load exc_addr with entIF
1280 // 1 cycle to hw_rei, E1
1281
1282 mfpr r29, pt_kgp // get the kgp, E1
1283
1284 hw_rei_spe // done, E1
1285
1286
1287
1288
1289
1290
1291// .sbttl "ARITH - Arithmetic Exception Trap Entry Point"
1292
1293//+
1294// ARITH - offset 0500
1295// Entry:
1296// Vectored into via hardware trap on arithmetic excpetion.
1297//
1298// Function:
1299// Build stack frame
1300// a0 <- exc_sum
1301// a1 <- exc_mask
1302// a2 <- unpred
1303// vector via entArith
1304//
1305//-
1306 HDW_VECTOR(PAL_ARITH_ENTRY)
1307Trap_Arith:
1308 DEBUGSTORE(0x4b)
1309 and r11, osfps_m_mode, r12 // get mode bit
1310 mfpr r31, ev5__va // unlock mbox
1311
1312 bis r11, r31, r25 // save ps
1313 mfpr r14, exc_addr // get pc
1314
1315 nop
1316 blbs r14, pal_pal_bug_check // arith trap from PAL
1317
1318 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1319 // no virt ref for next 2 cycles
1320 beq r12, TRAP_ARITH_10_ // if zero we are in kern now
1321
1322 bis r31, r31, r25 // set the new ps
1323 mtpr r30, pt_usp // save user stack
1324
1325 nop
1326 mfpr r30, pt_ksp // get kern stack
1327
1328TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
1329 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1330
1331 nop // Pad current mode write and stq
1332 mfpr r13, ev5__exc_sum // get the exc_sum
1333
1334 mfpr r12, pt_entarith
1335 stq r14, osfsf_pc(sp) // save pc
1336
1337 stq r17, osfsf_a1(sp)
1338 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
1339
1340 stq r11, osfsf_ps(sp) // save ps
1341 bis r25, r31, r11 // set new ps
1342
1343 stq r16, osfsf_a0(sp) // save regs
1344 srl r13, exc_sum_v_swc, r16// shift data to correct position
1345
1346 stq r18, osfsf_a2(sp)
1347// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
1348 mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
1349
1350 stq r29, osfsf_gp(sp)
1351 mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
1352
1353 mfpr r29, pt_kgp // get the kern gp - E1
1354 hw_rei_spe // done - E1
1355
1356
1357
1358
1359
1360
1361// .sbttl "FEN - Illegal Floating Point Operation Trap Entry Point"
1362
1363//+
1364// FEN - offset 0580
1365// Entry:
1366// Vectored into via hardware trap on illegal FP op.
1367//
1368// Function:
1369// Build stack frame
1370// a0 <- code
1371// a1 <- unpred
1372// a2 <- unpred
1373// vector via entIF
1374//
1375//-
1376
1377 HDW_VECTOR(PAL_FEN_ENTRY)
1378Trap_Fen:
1379 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1380 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1381
1382 mfpr r14, exc_addr // get pc
1383 blbs r14, pal_pal_bug_check // check opcdec in palmode
1384
1385 mfpr r13, ev5__icsr
1386 nop
1387
1388 bis r11, r31, r12 // Save PS
1389 bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
1390
1391 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1392 // no virt ref for next 2 cycles
1393 mtpr r30, pt_usp // save user stack
1394
1395 bis r31, r31, r12 // Set new PS
1396 mfpr r30, pt_ksp
1397
1398TRAP_FEN_10_:
1399 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1400 srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
1401
1402
1403 stq r16, osfsf_a0(sp) // save regs
1404 mfpr r13, pt_entif // get entry point
1405
1406 stq r18, osfsf_a2(sp) // a2
1407 stq r11, osfsf_ps(sp) // save old ps
1408
1409 stq r29, osfsf_gp(sp) // save gp
1410 bis r12, r31, r11 // set new ps
1411
1412 stq r17, osfsf_a1(sp) // a1
1413 blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
1414
1415 bis r31, osf_a0_fen, r16 // set a0
1416 stq r14, osfsf_pc(sp) // save pc
1417
1418 mtpr r13, exc_addr // load exc_addr with entIF
1419 // 1 cycle to hw_rei -E1
1420
1421 mfpr r29, pt_kgp // get the kgp -E1
1422
1423 hw_rei_spe // done -E1
1424
1425// FEN trap was taken, but the fault is really opcdec.
1426 ALIGN_BRANCH
1427fen_to_opcdec:
1428 addq r14, 4, r14 // save PC+4
1429 bis r31, osf_a0_opdec, r16 // set a0
1430
1431 stq r14, osfsf_pc(sp) // save pc
1432 mtpr r13, exc_addr // load exc_addr with entIF
1433 // 1 cycle to hw_rei
1434
1435 mfpr r29, pt_kgp // get the kgp
1436 hw_rei_spe // done
1437
1438
1439
1440// .sbttl "Misc handlers"
1441 // Start area for misc code.
1442//+
1443//dfault_trap_cont
1444// A dfault trap has been taken. The sp has been updated if necessary.
1445// Push a stack frame a vector via entMM.
1446//
1447// Current state:
1448// r12 - new PS
1449// r13 - MMstat
1450// VA - locked
1451//
1452//-
1453 ALIGN_BLOCK
1454dfault_trap_cont:
1455 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1456 mfpr r25, ev5__va // Fetch VA/unlock
1457
1458 stq r18, osfsf_a2(sp) // a2
1459 and r13, 1, r18 // Clean r/w bit for a2
1460
1461 stq r16, osfsf_a0(sp) // save regs
1462 bis r25, r31, r16 // a0 <- va
1463
1464 stq r17, osfsf_a1(sp) // a1
1465 srl r13, 1, r17 // shift fault bits to right position
1466
1467 stq r11, osfsf_ps(sp) // save old ps
1468 bis r12, r31, r11 // update ps
1469
1470 stq r14, osfsf_pc(sp) // save pc
1471 mfpr r25, pt_entmm // get entry point
1472
1473 stq r29, osfsf_gp(sp) // save gp
1474 cmovlbs r17, 1, r17 // a2. acv overrides fox.
1475
1476 mtpr r25, exc_addr // load exc_addr with entMM
1477 // 1 cycle to hw_rei
1478 mfpr r29, pt_kgp // get the kgp
1479
1480 hw_rei_spe // done
1481
1482//+
1483//unalign_trap_cont
1484// An unalign trap has been taken. Just need to finish up a few things.
1485//
1486// Current state:
1487// r25 - entUna
1488// r13 - shifted MMstat
1489//
1490//-
1491 ALIGN_BLOCK
1492unalign_trap_cont:
1493 mtpr r25, exc_addr // load exc_addr with entUna
1494 // 1 cycle to hw_rei
1495
1496
1497 mfpr r29, pt_kgp // get the kgp
1498 and r13, mm_stat_m_ra, r18 // Clean Ra for a2
1499
1500 hw_rei_spe // done
1501
1502
1503
1504//+
1505// dfault_in_pal
1506// Dfault trap was taken, exc_addr points to a PAL PC.
1507// r9 - mmstat<opcode> right justified
1508// r8 - exception address
1509//
1510// These are the cases:
1511// opcode was STQ -- from a stack builder, KSP not valid halt
1512// r14 - original exc_addr
1513// r11 - original PS
1514// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
1515// KSP not valid halt
1516// r11 - original PS
1517// r14 - original exc_addr
1518// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
1519// r11 - original PS
1520// r14 - original exc_addr
1521// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
1522// r10 - original exc_addr
1523// r11 - original PS
1524//
1525//
1526//-
1527 ALIGN_BLOCK
1528dfault_in_pal:
1529 DEBUGSTORE(0x50)
1530 bic r8, 3, r8 // Clean PC
1531 mfpr r9, pal_base
1532
1533 mfpr r31, va // unlock VA
1534#if real_mm != 0
1535 // if not real_mm, should never get here from miss flows
1536
1537 subq r9, r8, r8 // pal_base - offset
1538
1539 lda r9, pal_itb_ldq-pal_base(r8)
1540 nop
1541
1542 beq r9, dfault_do_bugcheck
1543 lda r9, pal_dtb_ldq-pal_base(r8)
1544
1545 beq r9, dfault_do_bugcheck
1546#endif
1547
1548//
1549// KSP invalid halt case --
1550ksp_inval_halt:
1551 DEBUGSTORE(76)
1552 bic r11, osfps_m_mode, r11 // set ps to kernel mode
1553 mtpr r0, pt0
1554
1555 mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
1556 mtpr r31, ips
1557
1558 mtpr r14, exc_addr // Set PC to instruction that caused trouble
1559//orig pvc_jsr updpcb, bsr=1
1560 bsr r0, pal_update_pcb // update the pcb
1561
1562 lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
1563 br r31, sys_enter_console // enter the console
1564
1565 ALIGN_BRANCH
1566dfault_do_bugcheck:
1567 bis r10, r31, r14 // bugcheck expects exc_addr in r14
1568 br r31, pal_pal_bug_check
1569
1570
1571 ALIGN_BLOCK
1572//+
1573// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
1574// On entry -
1575// r14 - exc_addr
1576// VA is locked
1577//
1578//-
1579dfault_fetch_ldr31_err:
1580 mtpr r11, ev5__dtb_cm
1581 mtpr r11, ev5__ps // Make sure ps hasn't changed
1582
1583 mfpr r31, va // unlock the mbox
1584 addq r14, 4, r14 // inc the pc to skip the fetch
1585
1586 mtpr r14, exc_addr // give ibox new PC
1587 mfpr r31, pt0 // pad exc_addr write
1588
1589 hw_rei
1590
1591
1592
1593 ALIGN_BLOCK
1594//+
1595// sys_from_kern
1596// callsys from kernel mode - OS bugcheck machine check
1597//
1598//-
1599sys_from_kern:
1600 mfpr r14, exc_addr // PC points to call_pal
1601 subq r14, 4, r14
1602
1603 lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
1604 br r31, pal_pal_mchk
1605
1606
1607// .sbttl "Continuation of long call_pal flows"
1608 ALIGN_BLOCK
1609//+
1610// wrent_tbl
1611// Table to write *int in paltemps.
1612// 4 instructions/entry
1613// r16 has new value
1614//
1615//-
1616wrent_tbl:
1617//orig pvc_jsr wrent, dest=1
1618 nop
1619 mtpr r16, pt_entint
1620
1621 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1622 hw_rei
1623
1624
1625//orig pvc_jsr wrent, dest=1
1626 nop
1627 mtpr r16, pt_entarith
1628
1629 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1630 hw_rei
1631
1632
1633//orig pvc_jsr wrent, dest=1
1634 nop
1635 mtpr r16, pt_entmm
1636
1637 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1638 hw_rei
1639
1640
1641//orig pvc_jsr wrent, dest=1
1642 nop
1643 mtpr r16, pt_entif
1644
1645 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1646 hw_rei
1647
1648
1649//orig pvc_jsr wrent, dest=1
1650 nop
1651 mtpr r16, pt_entuna
1652
1653 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1654 hw_rei
1655
1656
1657//orig pvc_jsr wrent, dest=1
1658 nop
1659 mtpr r16, pt_entsys
1660
1661 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1662 hw_rei
1663
1664 ALIGN_BLOCK
1665//+
1666// tbi_tbl
1667// Table to do tbi instructions
1668// 4 instructions per entry
1669//-
1670tbi_tbl:
1671 // -2 tbia
1672//orig pvc_jsr tbi, dest=1
1673 mtpr r31, ev5__dtb_ia // Flush DTB
1674 mtpr r31, ev5__itb_ia // Flush ITB
1675
1676#if icflush_on_tbix != 0
1677
1678
1679 br r31, pal_ic_flush // Flush Icache
1680#else
1681
1682 hw_rei_stall
1683#endif
1684
1685 nop // Pad table
1686
1687 // -1 tbiap
1688//orig pvc_jsr tbi, dest=1
1689 mtpr r31, ev5__dtb_iap // Flush DTB
1690 mtpr r31, ev5__itb_iap // Flush ITB
1691
1692#if icflush_on_tbix != 0
1693
1694
1695 br r31, pal_ic_flush // Flush Icache
1696#else
1697
1698 hw_rei_stall
1699#endif
1700
1701 nop // Pad table
1702
1703
1704 // 0 unused
1705//orig pvc_jsr tbi, dest=1
1706 hw_rei // Pad table
1707 nop
1708 nop
1709 nop
1710
1711
1712 // 1 tbisi
1713//orig pvc_jsr tbi, dest=1
1714#if icflush_on_tbix != 0
1715
1716
1717
1718 nop
1719 br r31, pal_ic_flush_and_tbisi // Flush Icache
1720 nop
1721 nop // Pad table
1722#else
1723
1724 nop
1725 nop
1726 mtpr r17, ev5__itb_is // Flush ITB
1727 hw_rei_stall
1728#endif
1729
1730
1731
1732 // 2 tbisd
1733//orig pvc_jsr tbi, dest=1
1734 mtpr r17, ev5__dtb_is // Flush DTB.
1735 nop
1736
1737 nop
1738 hw_rei_stall
1739
1740
1741 // 3 tbis
1742//orig pvc_jsr tbi, dest=1
1743 mtpr r17, ev5__dtb_is // Flush DTB
1744#if icflush_on_tbix != 0
1745
1746
1747 br r31, pal_ic_flush_and_tbisi // Flush Icache and ITB
1748#else
1749 br r31, tbi_finish
1750 ALIGN_BRANCH
1751tbi_finish:
1752 mtpr r17, ev5__itb_is // Flush ITB
1753 hw_rei_stall
1754#endif
1755
1756
1757
1758 ALIGN_BLOCK
1759//+
1760// bpt_bchk_common:
1761// Finish up the bpt/bchk instructions
1762//-
1763bpt_bchk_common:
1764 stq r18, osfsf_a2(sp) // a2
1765 mfpr r13, pt_entif // get entry point
1766
1767 stq r12, osfsf_ps(sp) // save old ps
1768 stq r14, osfsf_pc(sp) // save pc
1769
1770 stq r29, osfsf_gp(sp) // save gp
1771 mtpr r13, exc_addr // load exc_addr with entIF
1772 // 1 cycle to hw_rei
1773
1774 mfpr r29, pt_kgp // get the kgp
1775
1776
1777 hw_rei_spe // done
1778
1779
1780 ALIGN_BLOCK
1781//+
1782// rti_to_user
1783// Finish up the rti instruction
1784//-
1785rti_to_user:
1786 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
1787 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
1788
1789 mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
1790 mtpr r25, pt_ksp // save off incase RTI to user
1791
1792 mfpr r30, pt_usp
1793 hw_rei_spe // and back
1794
1795
1796 ALIGN_BLOCK
1797//+
1798// rti_to_kern
1799// Finish up the rti instruction
1800//-
1801rti_to_kern:
1802 and r12, osfps_m_ipl, r11 // clean ps
1803 mfpr r12, pt_intmask // get int mask
1804
1805 extbl r12, r11, r12 // get mask for this ipl
1806 mtpr r25, pt_ksp // save off incase RTI to user
1807
1808 mtpr r12, ev5__ipl // set the new ipl.
1809 or r25, r31, sp // sp
1810
1811// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
1812 hw_rei
1813
1814 ALIGN_BLOCK
1815//+
1816// swpctx_cont
1817// Finish up the swpctx instruction
1818//-
1819
1820swpctx_cont:
1821#if ev5_p1 != 0
1822
1823
1824 bic r25, r24, r25 // clean icsr<FPE>
1825 get_impure r8 // get impure pointer
1826
1827 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1828 fix_impure_ipr r8 // adjust impure pointer
1829
1830 restore_reg1 pmctr_ctl, r8, r8, ipr=1 // "ldqp" - get pmctr_ctl bits
1831 srl r23, 32, r24 // move asn to low asn pos
1832
1833 ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
1834 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1835
1836 or r25, r12, r25 // icsr with new fen
1837 sll r24, itb_asn_v_asn, r12
1838
1839#else
1840
1841 bic r25, r24, r25 // clean icsr<FPE,PMP>
1842 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1843
1844 ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
1845 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1846
1847 or r25, r12, r25 // icsr with new fen
1848 srl r23, 32, r24 // move asn to low asn pos
1849
1850 and r22, 1, r22
1851 sll r24, itb_asn_v_asn, r12
1852
1853 sll r22, icsr_v_pmp, r22
1854 nop
1855
1856 or r25, r22, r25 // icsr with new pme
1857#endif
1858
1859 sll r24, dtb_asn_v_asn, r24
1860
1861 subl r23, r13, r13 // gen new cc offset
1862 mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
1863
1864 mtpr r24, dtb_asn // Load up new ASN
1865 mtpr r25, icsr // write the icsr
1866
1867 sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1868 ldqp r25, osfpcb_q_usp(r16) // get new usp
1869
1870 insll r13, 4, r13 // >> 32
1871// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
1872 mtpr r14, pt_ptbr // load the new ptbr
1873
1874 mtpr r13, cc // set new offset
1875 ldqp r30, osfpcb_q_ksp(r16) // get new ksp
1876
1877// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
1878 mtpr r25, pt_usp // save usp
1879
1880#if ev5_p1 != 0
1881
1882
1883 blbc r8, no_pm_change // if monitoring all processes -- no need to change pm
1884
1885 // otherwise, monitoring select processes - update pm
1886 lda r25, 0x3F(r31)
1887 cmovlbc r22, r31, r8 // if pme set, disable counters, otherwise use saved encodings
1888
1889 sll r25, pmctr_v_ctl2, r25 // create ctl field bit mask
1890 mfpr r22, ev5__pmctr
1891
1892 and r8, r25, r8 // mask new ctl value
1893 bic r22, r25, r22 // clear ctl field in pmctr
1894
1895 or r8, r22, r8
1896 mtpr r8, ev5__pmctr
1897
1898no_pm_change:
1899#endif
1900
1901
1902#if osf_chm_fix != 0
1903
1904
1905 p4_fixup_hw_rei_stall // removes this section for Pass 4 by placing a hw_rei_stall here
1906
1907#if build_fixed_image != 0
1908
1909
1910 hw_rei_stall
1911#else
1912
1913 mfpr r9, pt_pcbb // get FEN
1914#endif
1915
1916 ldqp r9, osfpcb_q_fen(r9)
1917 blbc r9, no_pm_change_10_ // skip if FEN disabled
1918
1919 mb // ensure no outstanding fills
1920 lda r12, 1<<dc_mode_v_dc_ena(r31)
1921 mtpr r12, dc_mode // turn dcache on so we can flush it
1922 nop // force correct slotting
1923 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1924 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1925 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1926 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1927
1928 lda r8, 0(r31) // flood the dcache with junk data
1929no_pm_change_5_: ldqp r31, 0(r8)
1930 lda r8, 0x20(r8) // touch each cache block
1931 srl r8, 13, r9
1932 blbc r9, no_pm_change_5_
1933
1934 mb // ensure no outstanding fills
1935 mtpr r31, dc_mode // turn the dcache back off
1936 nop // force correct slotting
1937 mfpr r31, pt0 // no hw_rei_stall in 0,1
1938#endif
1939
1940
1941no_pm_change_10_: hw_rei_stall // back we go
1942
1943 ALIGN_BLOCK
1944//+
1945// swppal_cont - finish up the swppal call_pal
1946//-
1947
1948swppal_cont:
1949 mfpr r2, pt_misc // get misc bits
1950 sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1951 or r2, r0, r2 // set the bit
1952 mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
1953 mtpr r2, pt_misc // update the chip
1954
1955 or r3, r31, r4
1956 mfpr r3, pt_impure // pass pointer to the impure area in r3
1957//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
1958//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
1959//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
1960//orig unfix_impure_ipr r3 // restore impure pointer
1961 lda r3, CNS_Q_IPR(r3)
1962 RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1963 RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1964 lda r3, -CNS_Q_IPR(r3)
1965
1966 or r31, r31, r0 // set status to success
1967// pvc_violate 1007
1968 jmp r31, (r4) // and call our friend, it's her problem now
1969
1970
1971swppal_fail:
1972 addq r0, 1, r0 // set unknown pal or not loaded
1973 hw_rei // and return
1974
1975
1976// .sbttl "Memory management"
1977
1978 ALIGN_BLOCK
1979//+
1980//foe_ipte_handler
1981// IFOE detected on level 3 pte, sort out FOE vs ACV
1982//
1983// on entry:
1984// with
1985// R8 = pte
1986// R10 = pc
1987//
1988// Function
1989// Determine TNV vs ACV vs FOE. Build stack and dispatch
1990// Will not be here if TNV.
1991//-
1992
1993foe_ipte_handler:
1994 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1995 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1996
1997 bis r11, r31, r12 // Save PS for stack write
1998 bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
1999
2000
2001 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2002 // no virt ref for next 2 cycles
2003 mtpr r30, pt_usp // save user stack
2004
2005 bis r31, r31, r11 // Set new PS
2006 mfpr r30, pt_ksp
2007
2008 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2009 nop
2010
2011foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
2012 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2013
2014 or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
2015 mfpr r13, pt_entmm // get entry point
2016
2017 stq r16, osfsf_a0(sp) // a0
2018 or r14, r31, r16 // pass pc/va as a0
2019
2020 stq r17, osfsf_a1(sp) // a1
2021 nop
2022
2023 stq r18, osfsf_a2(sp) // a2
2024 lda r17, mmcsr_c_acv(r31) // assume ACV
2025
2026 stq r16, osfsf_pc(sp) // save pc
2027 cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
2028
2029 stq r12, osfsf_ps(sp) // save ps
2030 subq r31, 1, r18 // pass flag of istream as a2
2031
2032 stq r29, osfsf_gp(sp)
2033 mtpr r13, exc_addr // set vector address
2034
2035 mfpr r29, pt_kgp // load kgp
2036 hw_rei_spe // out to exec
2037
2038 ALIGN_BLOCK
2039//+
2040//invalid_ipte_handler
2041// TNV detected on level 3 pte, sort out TNV vs ACV
2042//
2043// on entry:
2044// with
2045// R8 = pte
2046// R10 = pc
2047//
2048// Function
2049// Determine TNV vs ACV. Build stack and dispatch.
2050//-
2051
2052invalid_ipte_handler:
2053 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2054 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2055
2056 bis r11, r31, r12 // Save PS for stack write
2057 bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
2058
2059
2060 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2061 // no virt ref for next 2 cycles
2062 mtpr r30, pt_usp // save user stack
2063
2064 bis r31, r31, r11 // Set new PS
2065 mfpr r30, pt_ksp
2066
2067 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2068 nop
2069
2070invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
2071 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2072
2073 or r10, r31, r14 // Save pc/va in case TBmiss on stack
2074 mfpr r13, pt_entmm // get entry point
2075
2076 stq r16, osfsf_a0(sp) // a0
2077 or r14, r31, r16 // pass pc/va as a0
2078
2079 stq r17, osfsf_a1(sp) // a1
2080 nop
2081
2082 stq r18, osfsf_a2(sp) // a2
2083 and r25, 1, r17 // Isolate kre
2084
2085 stq r16, osfsf_pc(sp) // save pc
2086 xor r17, 1, r17 // map to acv/tnv as a1
2087
2088 stq r12, osfsf_ps(sp) // save ps
2089 subq r31, 1, r18 // pass flag of istream as a2
2090
2091 stq r29, osfsf_gp(sp)
2092 mtpr r13, exc_addr // set vector address
2093
2094 mfpr r29, pt_kgp // load kgp
2095 hw_rei_spe // out to exec
2096
2097
2098
2099
2100 ALIGN_BLOCK
2101//+
2102//invalid_dpte_handler
2103// INVALID detected on level 3 pte, sort out TNV vs ACV
2104//
2105// on entry:
2106// with
2107// R10 = va
2108// R8 = pte
2109// R9 = mm_stat
2110// PT6 = pc
2111//
2112// Function
2113// Determine TNV vs ACV. Build stack and dispatch
2114//-
2115
2116
2117invalid_dpte_handler:
2118 mfpr r12, pt6
2119 blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
2120
2121 bis r12, r31, r14 // save PC in case of tbmiss or fault
2122 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
2123
2124 mtpr r11, pt0 // Save PS for stack write
2125 and r25, mm_stat_m_opcode, r25 // isolate opcode
2126
2127 cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
2128 blbs r25, nmiss_fetch_ldr31_err // yes
2129
2130 //dismiss exception if load to r31/f31
2131 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
2132
2133 // not a store or fetch, must be a load
2134 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
2135
2136 and r25, 0x1F, r25 // isolate rnum
2137 nop
2138
2139 cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
2140 bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
2141
2142invalid_dpte_no_dismiss:
2143 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2144 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2145
2146 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2147 // no virt ref for next 2 cycles
2148 bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
2149
2150 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2151 mtpr r30, pt_usp // save user stack
2152
2153 bis r31, r31, r11 // Set new PS
2154 mfpr r30, pt_ksp
2155
2156invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
2157 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2158
2159 or r10, r31, r25 // Save va in case TBmiss on stack
2160 and r9, 1, r13 // save r/w flag
2161
2162 stq r16, osfsf_a0(sp) // a0
2163 or r25, r31, r16 // pass va as a0
2164
2165 stq r17, osfsf_a1(sp) // a1
2166 or r31, mmcsr_c_acv, r17 // assume acv
2167
2168 srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
2169 stq r29, osfsf_gp(sp)
2170
2171 stq r18, osfsf_a2(sp) // a2
2172 cmovlbs r13, r25, r12 // if write access move acv based on write enable
2173
2174 or r13, r31, r18 // pass flag of dstream access and read vs write
2175 mfpr r25, pt0 // get ps
2176
2177 stq r14, osfsf_pc(sp) // save pc
2178 mfpr r13, pt_entmm // get entry point
2179
2180 stq r25, osfsf_ps(sp) // save ps
2181 mtpr r13, exc_addr // set vector address
2182
2183 mfpr r29, pt_kgp // load kgp
2184 cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
2185
2186 hw_rei_spe // out to exec
2187
2188//+
2189//
2190// We come here if we are erring on a dtb_miss, and the instr is a
2191// fetch, fetch_m, of load to r31/f31.
2192// The PC is incremented, and we return to the program.
2193// essentially ignoring the instruction and error.
2194//
2195//-
2196 ALIGN_BLOCK
2197nmiss_fetch_ldr31_err:
2198 mfpr r12, pt6
2199 addq r12, 4, r12 // bump pc to pc+4
2200
2201 mtpr r12, exc_addr // and set entry point
2202 mfpr r31, pt0 // pad exc_addr write
2203
2204 hw_rei //
2205
2206 ALIGN_BLOCK
2207//+
2208// double_pte_inv
2209// We had a single tbmiss which turned into a double tbmiss which found
2210// an invalid PTE. Return to single miss with a fake pte, and the invalid
2211// single miss flow will report the error.
2212//
2213// on entry:
2214// r21 PTE
2215// r22 available
2216// VA IPR locked with original fault VA
2217// pt4 saved r21
2218// pt5 saved r22
2219// pt6 original exc_addr
2220//
2221// on return to tbmiss flow:
2222// r8 fake PTE
2223//
2224//
2225//-
2226double_pte_inv:
2227 srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
2228 mfpr r22, exc_addr // get the pc
2229
2230 lda r22, 4(r22) // inc the pc
2231 lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
2232
2233 cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
2234 mtpr r22, exc_addr // set for rei
2235
2236 mfpr r21, pt4 // restore regs
2237 mfpr r22, pt5 // restore regs
2238
2239 hw_rei // back to tb miss
2240
2241 ALIGN_BLOCK
2242//+
2243//tnv_in_pal
2244// The only places in pal that ld or store are the
2245// stack builders, rti or retsys. Any of these mean we
2246// need to take a ksp not valid halt.
2247//
2248//-
2249tnv_in_pal:
2250
2251
2252 br r31, ksp_inval_halt
2253
2254
2255// .sbttl "Icache flush routines"
2256
2257 ALIGN_BLOCK
2258//+
2259// Common Icache flush routine.
2260//
2261//
2262//-
2263pal_ic_flush:
2264 nop
2265 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
2266 nop
2267 nop
2268
2269// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2270 nop
2271 nop
2272 nop
2273 nop
2274
2275 nop
2276 nop
2277 nop
2278 nop
2279
2280 nop
2281 nop // 10
2282
2283 nop
2284 nop
2285 nop
2286 nop
2287
2288 nop
2289 nop
2290 nop
2291 nop
2292
2293 nop
2294 nop // 20
2295
2296 nop
2297 nop
2298 nop
2299 nop
2300
2301 nop
2302 nop
2303 nop
2304 nop
2305
2306 nop
2307 nop // 30
2308 nop
2309 nop
2310 nop
2311 nop
2312
2313 nop
2314 nop
2315 nop
2316 nop
2317
2318 nop
2319 nop // 40
2320
2321 nop
2322 nop
2323
2324one_cycle_and_hw_rei:
2325 nop
2326 nop
2327
2328 hw_rei_stall
2329
2330#if icflush_on_tbix != 0
2331
2332
2333 ALIGN_BLOCK
2334
2335//+
2336// Common Icache flush and ITB invalidate single routine.
2337// ITBIS and hw_rei_stall must be in same octaword.
2338// r17 - has address to invalidate
2339//
2340//-
2341PAL_IC_FLUSH_AND_TBISI:
2342 nop
2343 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
2344 nop
2345 nop
2346
2347// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2348 nop
2349 nop
2350 nop
2351 nop
2352
2353 nop
2354 nop
2355 nop
2356 nop
2357
2358 nop
2359 nop // 10
2360
2361 nop
2362 nop
2363 nop
2364 nop
2365
2366 nop
2367 nop
2368 nop
2369 nop
2370
2371 nop
2372 nop // 20
2373
2374 nop
2375 nop
2376 nop
2377 nop
2378
2379 nop
2380 nop
2381 nop
2382 nop
2383
2384 nop
2385 nop // 30
2386 nop
2387 nop
2388 nop
2389 nop
2390
2391 nop
2392 nop
2393 nop
2394 nop
2395
2396 nop
2397 nop // 40
2398
2399
2400 nop
2401 nop
2402
2403 nop
2404 nop
2405
2406 // A quadword is 64 bits, so an octaword is 128 bits -> 16 bytes -> 4 instructions
2407 // 44 nops plus 4 instructions before it is 48 instructions.
2408 // Since this routine started on a 32-byte (8 instruction) boundary,
2409 // the following 2 instructions will be in the same octword as required.
2410// ALIGN_BRANCH
2411 mtpr r17, ev5__itb_is // Flush ITB
2412 hw_rei_stall
2413
2414#endif
2415
2416 ALIGN_BLOCK
2417//+
2418//osfpal_calpal_opcdec
2419// Here for all opcdec CALL_PALs
2420//
2421// Build stack frame
2422// a0 <- code
2423// a1 <- unpred
2424// a2 <- unpred
2425// vector via entIF
2426//
2427//-
2428
2429osfpal_calpal_opcdec:
2430 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2431 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2432
2433 mfpr r14, exc_addr // get pc
2434 nop
2435
2436 bis r11, r31, r12 // Save PS for stack write
2437 bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
2438
2439
2440 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2441 // no virt ref for next 2 cycles
2442 mtpr r30, pt_usp // save user stack
2443
2444 bis r31, r31, r11 // Set new PS
2445 mfpr r30, pt_ksp
2446
2447osfpal_calpal_opcdec_10_:
2448 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2449 nop
2450
2451 stq r16, osfsf_a0(sp) // save regs
2452 bis r31, osf_a0_opdec, r16 // set a0
2453
2454 stq r18, osfsf_a2(sp) // a2
2455 mfpr r13, pt_entif // get entry point
2456
2457 stq r12, osfsf_ps(sp) // save old ps
2458 stq r17, osfsf_a1(sp) // a1
2459
2460 stq r14, osfsf_pc(sp) // save pc
2461 nop
2462
2463 stq r29, osfsf_gp(sp) // save gp
2464 mtpr r13, exc_addr // load exc_addr with entIF
2465 // 1 cycle to hw_rei
2466
2467 mfpr r29, pt_kgp // get the kgp
2468
2469
2470 hw_rei_spe // done
2471
2472
2473
2474
2475
2476//+
2477//pal_update_pcb
2478// Update the PCB with the current SP, AST, and CC info
2479//
2480// r0 - return linkage
2481//-
2482 ALIGN_BLOCK
2483
2484pal_update_pcb:
2485 mfpr r12, pt_pcbb // get pcbb
2486 and r11, osfps_m_mode, r25 // get mode
2487 beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
2488 mtpr r30, pt_usp // save user stack
2489 stqp r30, osfpcb_q_usp(r12) // store usp
2490 br r31, pal_update_pcb_20_ // join common
2491pal_update_pcb_10_: stqp r30, osfpcb_q_ksp(r12) // store ksp
2492pal_update_pcb_20_: rpcc r13 // get cyccounter
2493 srl r13, 32, r14 // move offset
2494 addl r13, r14, r14 // merge for new time
2495 stlp r14, osfpcb_l_cc(r12) // save time
2496
2497//orig pvc_jsr updpcb, bsr=1, dest=1
2498 ret r31, (r0)
2499
2500
2501
2502#if remove_save_state == 0
2503
2504// .sbttl "PAL_SAVE_STATE"
2505//+
2506//
2507// Pal_save_state
2508//
2509// Function
2510// All chip state saved, all PT's, SR's FR's, IPR's
2511//
2512//
2513// Regs' on entry...
2514//
2515// R0 = halt code
2516// pt0 = r0
2517// R1 = pointer to impure
2518// pt4 = r1
2519// R3 = return addr
2520// pt5 = r3
2521//
2522// register usage:
2523// r0 = halt_code
2524// r1 = addr of impure area
2525// r3 = return_address
2526// r4 = scratch
2527//
2528//-
2529
2530
2531 ALIGN_BLOCK
2532 .globl pal_save_state
2533pal_save_state:
2534//
2535//
2536// start of implementation independent save routine
2537//
2538// the impure area is larger than the addressibility of hw_ld and hw_st
2539// therefore, we need to play some games: The impure area
2540// is informally divided into the "machine independent" part and the
2541// "machine dependent" part. The state that will be saved in the
2542// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
2543// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
2544// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
2545// macros will automagically adjust the offset correctly.
2546//
2547
2548// The distributed code is commented out and followed by corresponding SRC code.
2549// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
2550
2551//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
2552 lda r1, 0x200(r1) // Point to center of CPU segment
2553//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
2554 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
2555//orig store_reg1 hlt, r0, r1, ipr=1
2556 SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
2557
2558 mfpr r0, pt0 // get r0 back //orig
2559//orig store_reg1 0, r0, r1 // save r0
2560 SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
2561
2562 mfpr r0, pt4 // get r1 back //orig
2563//orig store_reg1 1, r0, r1 // save r1
2564 SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
2565
2566//orig store_reg 2 // save r2
2567 SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
2568
2569 mfpr r0, pt5 // get r3 back //orig
2570//orig store_reg1 3, r0, r1 // save r3
2571 SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
2572
2573 // reason code has been saved
2574 // r0 has been saved
2575 // r1 has been saved
2576 // r2 has been saved
2577 // r3 has been saved
2578 // pt0, pt4, pt5 have been lost
2579
2580 //
2581 // Get out of shadow mode
2582 //
2583
2584 mfpr r2, icsr // Get icsr //orig
2585//orig ldah r0, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
2586 ldah r0, (1<<(icsr_v_sde-16))(r31)
2587 bic r2, r0, r0 // ICSR with SDE clear //orig
2588 mtpr r0, icsr // Turn off SDE //orig
2589
2590 mfpr r31, pt0 // SDE bubble cycle 1 //orig
2591 mfpr r31, pt0 // SDE bubble cycle 2 //orig
2592 mfpr r31, pt0 // SDE bubble cycle 3 //orig
2593 nop //orig
2594
2595
2596 // save integer regs R4-r31
2597//orig #define t 4
2598//orig .repeat 28
2599//orig store_reg \t
2600//orig #define t t + 1
2601//orig .endr
2602 SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
2603 SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
2604 SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
2605 SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
2606 SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
2607 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
2608 SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
2609 SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
2610 SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
2611 SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
2612 SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
2613 SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
2614 SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
2615 SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
2616 SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
2617 SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
2618 SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2619 SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2620 SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2621 SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2622 SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2623 SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2624 SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2625 SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2626 SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2627 SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2628 SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2629 SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2630
2631 // save all paltemp regs except pt0
2632
2633//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
2634//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
2635//orig #define t 1
2636//orig .repeat 23
2637//orig store_reg \t , pal=1
2638//orig #define t t + 1
2639//orig .endr
2640
2641 lda r1, -0x200(r1) // Restore the impure base address.
2642 lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
2643 SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
2644 SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
2645 SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
2646 SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
2647 SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
2648 SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
2649 SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
2650 SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
2651 SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
2652 SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
2653 SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
2654 SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
2655 SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
2656 SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
2657 SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
2658 SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
2659 SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
2660 SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
2661 SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
2662 SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
2663 SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2664 SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2665 SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2666 SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2667
2668 // Restore shadow mode
2669 mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write) //orig
2670 mfpr r31, pt0 //orig
2671 mtpr r2, icsr // Restore original ICSR //orig
2672
2673 mfpr r31, pt0 // SDE bubble cycle 1 //orig
2674 mfpr r31, pt0 // SDE bubble cycle 2 //orig
2675 mfpr r31, pt0 // SDE bubble cycle 3 //orig
2676 nop //orig
2677
2678 // save all integer shadow regs
2679
2680//orig #define t 8
2681//orig .repeat 7
2682//orig store_reg \t, shadow=1
2683//orig #define t t + 1
2684//orig .endr
2685//orig store_reg 25, shadow=1
2686
2687 SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
2688 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2689 SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2690 SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2691 SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2692 SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2693 SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2694 SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2695
2696//orig store_reg exc_addr, ipr=1 // save ipr
2697//orig store_reg pal_base, ipr=1 // save ipr
2698//orig store_reg mm_stat, ipr=1 // save ipr
2699//orig store_reg va, ipr=1 // save ipr
2700//orig store_reg icsr, ipr=1 // save ipr
2701//orig store_reg ipl, ipr=1 // save ipr
2702//orig store_reg ps, ipr=1 // save ipr
2703//orig store_reg itb_asn, ipr=1 // save ipr
2704//orig store_reg aster, ipr=1 // save ipr
2705//orig store_reg astrr, ipr=1 // save ipr
2706//orig store_reg sirr, ipr=1 // save ipr
2707//orig store_reg isr, ipr=1 // save ipr
2708//orig store_reg ivptbr, ipr=1 // save ipr
2709//orig store_reg mcsr, ipr=1 // save ipr
2710//orig store_reg dc_mode, ipr=1 // save ipr
2711
2712 SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2713 SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2714 SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
2715 SAVE_IPR(va,CNS_Q_VA,r1)
2716 SAVE_IPR(icsr,CNS_Q_ICSR,r1)
2717 SAVE_IPR(ipl,CNS_Q_IPL,r1)
2718 SAVE_IPR(ips,CNS_Q_IPS,r1)
2719 SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2720 SAVE_IPR(aster,CNS_Q_ASTER,r1)
2721 SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
2722 SAVE_IPR(sirr,CNS_Q_SIRR,r1)
2723 SAVE_IPR(isr,CNS_Q_ISR,r1)
2724 SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2725 SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
2726 SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2727
2728//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
2729//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
2730//orig // PVC violation applies only to
2731pvc$osf35$379: // loads. HW_ST ok here, so ignore
2732 SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
2733
2734
2735 //the following iprs are informational only -- will not be restored
2736
2737//orig store_reg icperr_stat, ipr=1
2738//orig store_reg pmctr, ipr=1
2739//orig store_reg intid, ipr=1
2740//orig store_reg exc_sum, ipr=1
2741//orig store_reg exc_mask, ipr=1
2742//orig ldah r14, 0xfff0(r31)
2743//orig zap r14, 0xE0, r14 // Get Cbox IPR base
2744//orig nop // pad mf dcperr_stat out of shadow of last store
2745//orig nop
2746//orig nop
2747//orig store_reg dcperr_stat, ipr=1
2748
2749 SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
2750 SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
2751 SAVE_IPR(intId,CNS_Q_INT_ID,r1)
2752 SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
2753 SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
2754 ldah r14, 0xFFF0(zero)
2755 zap r14, 0xE0, r14 // Get base address of CBOX IPRs
2756 NOP // Pad mfpr dcPerr out of shadow of
2757 NOP // last store
2758 NOP
2759 SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
2760
2761 // read cbox ipr state
2762
2763//orig mb
2764//orig ldqp r2, ev5__sc_ctl(r14)
2765//orig ldqp r13, ld_lock(r14)
2766//orig ldqp r4, ev5__sc_addr(r14)
2767//orig ldqp r5, ev5__ei_addr(r14)
2768//orig ldqp r6, ev5__bc_tag_addr(r14)
2769//orig ldqp r7, ev5__fill_syn(r14)
2770//orig bis r5, r4, r31
2771//orig bis r7, r6, r31 // make sure previous loads finish before reading stat registers which unlock them
2772//orig ldqp r8, ev5__sc_stat(r14) // unlocks sc_stat,sc_addr
2773//orig ldqp r9, ev5__ei_stat(r14) // may unlock ei_*, bc_tag_addr, fill_syn
2774//orig ldqp r31, ev5__ei_stat(r14) // ensures it is really unlocked
2775//orig mb
2776
2777#ifndef SIMOS
2778 mb
2779 ldq_p r2, scCtl(r14)
2780 ldq_p r13, ldLock(r14)
2781 ldq_p r4, scAddr(r14)
2782 ldq_p r5, eiAddr(r14)
2783 ldq_p r6, bcTagAddr(r14)
2784 ldq_p r7, fillSyn(r14)
2785 bis r5, r4, zero // Make sure all loads complete before
2786 bis r7, r6, zero // reading registers that unlock them.
2787 ldq_p r8, scStat(r14) // Unlocks scAddr.
2788 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
2789 ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
2790 mb
2791#endif
2792//orig // save cbox ipr state
2793//orig store_reg1 sc_ctl, r2, r1, ipr=1
2794//orig store_reg1 ld_lock, r13, r1, ipr=1
2795//orig store_reg1 sc_addr, r4, r1, ipr=1
2796//orig store_reg1 ei_addr, r5, r1, ipr=1
2797//orig store_reg1 bc_tag_addr, r6, r1, ipr=1
2798//orig store_reg1 fill_syn, r7, r1, ipr=1
2799//orig store_reg1 sc_stat, r8, r1, ipr=1
2800//orig store_reg1 ei_stat, r9, r1, ipr=1
2801//orig //bc_config? sl_rcv?
2802
2803 SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
2804 SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
2805 SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
2806 SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
2807 SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
2808 SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
2809 SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
2810 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
2811
2812// restore impure base //orig
2813//orig unfix_impure_ipr r1
2814 lda r1, -CNS_Q_IPR(r1)
2815
2816// save all floating regs //orig
2817 mfpr r0, icsr // get icsr //orig
2818 or r31, 1, r2 // get a one //orig
2819//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot //orig
2820 sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
2821 or r2, r0, r0 // set FEN on //orig
2822 mtpr r0, icsr // write to icsr, enabling FEN //orig
2823
2824// map the save area virtually
2825// orig mtpr r31, dtb_ia // clear the dtb
2826// orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
2827// orig sll r0, 32, r0 // shift to PFN field
2828// orig lda r2, 0xff(r31) // all read enable and write enable bits set
2829// orig sll r2, 8, r2 // move to PTE location
2830// orig addq r0, r2, r0 // combine with PFN
2831// orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2832// orig mtpr r1, dtb_tag // write TB tag
2833
2834 mtpr r31, dtbIa // Clear all DTB entries
2835 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2836 sll r0, pte_v_pfn, r0 // Shift to form PFN
2837 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2838 mtpr r0, dtbPte // Load the PTE and set valid
2839 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2840
2841
2842//orig // map the next page too - in case the impure area crosses a page boundary
2843//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
2844//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
2845//orig sll r0, 32, r0 // shift to PFN field
2846//orig lda r2, 0xff(r31) // all read enable and write enable bits set
2847//orig sll r2, 8, r2 // move to PTE location
2848//orig addq r0, r2, r0 // combine with PFN
2849//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2850//orig mtpr r4, dtb_tag // write TB tag
2851
2852 lda r4, (1<<va_s_off)(r1) // Generate address for next page
2853 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2854 sll r0, pte_v_pfn, r0 // Shift to form PFN
2855 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2856 mtpr r0, dtbPte // Load the PTE and set valid
2857 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2858
2859 sll r31, 0, r31 // stall cycle 1 // orig
2860 sll r31, 0, r31 // stall cycle 2 // orig
2861 sll r31, 0, r31 // stall cycle 3 // orig
2862 nop // orig
2863
2864//orig // add offset for saving fpr regs
2865//orig fix_impure_gpr r1
2866
2867 lda r1, 0x200(r1) // Point to center of CPU segment
2868
2869// now save the regs - F0-F31
2870
2871//orig #define t 0
2872//orig .repeat 32
2873//orig store_reg \t , fpu=1
2874//orig #define t t + 1
2875//orig .endr
2876
2877 mf_fpcr f0 // original
2878
2879 SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
2880 SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
2881 SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
2882 SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
2883 SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
2884 SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
2885 SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
2886 SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
2887 SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
2888 SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
2889 SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
2890 SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
2891 SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
2892 SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
2893 SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
2894 SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
2895 SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
2896 SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
2897 SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
2898 SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
2899 SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2900 SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2901 SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2902 SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2903 SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2904 SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2905 SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2906 SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2907 SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2908 SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2909 SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2910 SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2911
2912//orig //switch impure offset from gpr to ipr---
2913//orig unfix_impure_gpr r1
2914//orig fix_impure_ipr r1
2915//orig store_reg1 fpcsr, f0, r1, fpcsr=1
2916
2917 SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach// pb
2918 lda r1, -0x200(r1) // Restore the impure base address
2919
2920//orig // and back to gpr ---
2921//orig unfix_impure_ipr r1
2922//orig fix_impure_gpr r1
2923
2924//orig lda r0, cns_mchksize(r31) // get size of mchk area
2925//orig store_reg1 mchkflag, r0, r1, ipr=1
2926//orig mb
2927
2928 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
2929 // save this using the IPR base (it is closer) not the GRP base as they used...pb
2930 lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
2931 SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2932 mb
2933
2934//orig or r31, 1, r0 // get a one
2935//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
2936//orig mb
2937
2938 lda r1, -CNS_Q_IPR(r1) // back to the base
2939 lda r1, 0x200(r1) // Point to center of CPU segment
2940 or r31, 1, r0 // get a one
2941 SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
2942 mb
2943
2944//orig // restore impure area base
2945//orig unfix_impure_gpr r1
2946 lda r1, -0x200(r1) // Point to center of CPU segment
2947
2948 mtpr r31, dtb_ia // clear the dtb //orig
2949 mtpr r31, itb_ia // clear the itb //orig
2950
2951//orig pvc_jsr savsta, bsr=1, dest=1
2952 ret r31, (r3) // and back we go
2953#endif
2954
2955
2956#if remove_restore_state == 0
2957
2958
2959// .sbttl "PAL_RESTORE_STATE"
2960//+
2961//
2962// Pal_restore_state
2963//
2964//
2965// register usage:
2966// r1 = addr of impure area
2967// r3 = return_address
2968// all other regs are scratchable, as they are about to
2969// be reloaded from ram.
2970//
2971// Function:
2972// All chip state restored, all SRs, FRs, PTs, IPRs
2973// *** except R1, R3, PT0, PT4, PT5 ***
2974//
2975//-
2976 ALIGN_BLOCK
2977pal_restore_state:
2978
2979//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2980
2981//orig // map the console io area virtually
2982//orig mtpr r31, dtb_ia // clear the dtb
2983//orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
2984//orig sll r0, 32, r0 // shift to PFN field
2985//orig lda r2, 0xff(r31) // all read enable and write enable bits set
2986//orig sll r2, 8, r2 // move to PTE location
2987//orig addq r0, r2, r0 // combine with PFN
2988//orig
2989//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2990//orig mtpr r1, dtb_tag // write TB tag
2991//orig
2992
2993 mtpr r31, dtbIa // Clear all DTB entries
2994 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2995 sll r0, pte_v_pfn, r0 // Shift to form PFN
2996 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2997 mtpr r0, dtbPte // Load the PTE and set valid
2998 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2999
3000
3001//orig // map the next page too, in case impure area crosses page boundary
3002//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
3003//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
3004//orig sll r0, 32, r0 // shift to PFN field
3005//orig lda r2, 0xff(r31) // all read enable and write enable bits set
3006//orig sll r2, 8, r2 // move to PTE location
3007//orig addq r0, r2, r0 // combine with PFN
3008//orig
3009//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
3010//orig mtpr r4, dtb_tag // write TB tag - no virtual mbox instruction for 3 cycles
3011
3012 lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
3013 srl r4, va_s_off, r0 // Clean off byte-within-page offset
3014 sll r0, pte_v_pfn, r0 // Shift to form PFN
3015 lda r0, pte_m_prot(r0) // Set all read/write enable bits
3016 mtpr r0, dtbPte // Load the PTE and set valid
3017 mtpr r4, dtbTag // Write the PTE and tag into the DTB
3018
3019//orig // save all floating regs
3020//orig mfpr r0, icsr // get icsr
3021//orig// assume ICSR_V_SDE gt <ICSR_V_FPE> // assertion checker
3022//orig or r31, <<1@<ICSR_V_SDE-ICSR_V_FPE>> ! 1>, r2 // set SDE and FPE
3023//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot
3024//orig or r2, r0, r0 // set FEN on
3025//orig mtpr r0, icsr // write to icsr, enabling FEN and SDE. 3 bubbles to floating instr.
3026
3027 mfpr r0, icsr // Get current ICSR
3028 bis zero, 1, r2 // Get a '1'
3029 or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
3030 sll r2, icsr_v_fpe, r2 // Shift bits into position
3031 bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
3032 mtpr r0, icsr // Update the chip
3033
3034 mfpr r31, pt0 // FPE bubble cycle 1 //orig
3035 mfpr r31, pt0 // FPE bubble cycle 2 //orig
3036 mfpr r31, pt0 // FPE bubble cycle 3 //orig
3037
3038//orig fix_impure_ipr r1
3039//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
3040//orig mt_fpcr f0
3041//orig
3042//orig unfix_impure_ipr r1
3043//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
3044//orig
3045//orig // restore all floating regs
3046//orig#define t 0
3047//orig .repeat 32
3048//orig restore_reg \t , fpu=1
3049//orig#define t t + 1
3050//orig .endr
3051
3052 lda r1, 200(r1) // Point to base of IPR area again
3053 RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
3054 mt_fpcr f0 // original
3055
3056 lda r1, 0x200(r1) // point to center of CPU segment
3057 RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
3058 RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
3059 RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
3060 RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
3061 RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
3062 RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
3063 RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
3064 RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
3065 RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
3066 RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
3067 RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
3068 RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
3069 RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
3070 RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
3071 RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
3072 RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
3073 RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
3074 RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
3075 RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
3076 RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
3077 RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
3078 RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
3079 RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
3080 RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
3081 RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
3082 RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
3083 RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
3084 RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
3085 RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
3086 RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
3087 RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
3088 RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
3089
3090//orig // switch impure pointer from gpr to ipr area --
3091//orig unfix_impure_gpr r1
3092//orig fix_impure_ipr r1
3093//orig
3094//orig // restore all pal regs
3095//orig#define t 1
3096//orig .repeat 23
3097//orig restore_reg \t , pal=1
3098//orig#define t t + 1
3099//orig .endr
3100
3101 lda r1, -0x200(r1) // Restore base address of impure area.
3102 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
3103 RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
3104 RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
3105 RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
3106 RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
3107 RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
3108 RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
3109 RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
3110 RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
3111 RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
3112 RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
3113 RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
3114 RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
3115 RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
3116 RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
3117 RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
3118 RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
3119 RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
3120 RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
3121 RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
3122 RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
3123 RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
3124 RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
3125 RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
3126 RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
3127
3128
3129//orig restore_reg exc_addr, ipr=1 // restore ipr
3130//orig restore_reg pal_base, ipr=1 // restore ipr
3131//orig restore_reg ipl, ipr=1 // restore ipr
3132//orig restore_reg ps, ipr=1 // restore ipr
3133//orig mtpr r0, dtb_cm // set current mode in mbox too
3134//orig restore_reg itb_asn, ipr=1
3135//orig srl r0, itb_asn_v_asn, r0
3136//orig sll r0, dtb_asn_v_asn, r0
3137//orig mtpr r0, dtb_asn // set ASN in Mbox too
3138//orig restore_reg ivptbr, ipr=1
3139//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
3140//orig restore_reg mcsr, ipr=1
3141//orig restore_reg aster, ipr=1
3142//orig restore_reg astrr, ipr=1
3143//orig restore_reg sirr, ipr=1
3144//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
3145//orig mfpr r31, pt0 // (may issue with mt maf_mode)
3146//orig mfpr r31, pt0 // bubble cycle 1
3147//orig mfpr r31, pt0 // bubble cycle 2
3148//orig mfpr r31, pt0 // bubble cycle 3
3149//orig mfpr r31, pt0 // (may issue with following ld)
3150
3151 // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
3152 RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
3153 RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
3154 RESTORE_IPR(ipl,CNS_Q_IPL,r1)
3155 RESTORE_IPR(ips,CNS_Q_IPS,r1)
3156 mtpr r0, dtbCm // Set Mbox current mode too.
3157 RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
3158 srl r0, 4, r0
3159 sll r0, 57, r0
3160 mtpr r0, dtbAsn // Set Mbox ASN too
3161 RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
3162 mtpr r0, mVptBr // Set Mbox VptBr too
3163 RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
3164 RESTORE_IPR(aster,CNS_Q_ASTER,r1)
3165 RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
3166 RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
3167 RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
3168 STALL
3169 STALL
3170 STALL
3171 STALL
3172 STALL
3173
3174
3175 // restore all integer shadow regs
3176//orig#define t 8
3177//orig .repeat 7
3178//orig restore_reg \t, shadow=1
3179//orig#define t t + 1
3180//orig .endr
3181//orig restore_reg 25, shadow=1
3182//orig restore_reg dc_mode, ipr=1 // no mbox instructions for 4 cycles
3183
3184 RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
3185 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
3186 RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
3187 RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
3188 RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
3189 RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
3190 RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
3191 RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
3192 RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
3193
3194 //
3195 // Get out of shadow mode
3196 //
3197
3198 mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway) //orig
3199 mfpr r31, pt0 // "" //orig
3200 mfpr r0, icsr // Get icsr //orig
3201//orig ldah r2, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
3202 ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location //orig
3203 bic r0, r2, r2 // ICSR with SDE clear //orig
3204 mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles //orig
3205
3206 mfpr r31, pt0 // SDE bubble cycle 1 //orig
3207 mfpr r31, pt0 // SDE bubble cycle 2 //orig
3208 mfpr r31, pt0 // SDE bubble cycle 3 //orig
3209 nop //orig
3210
3211//orig // switch impure pointer from ipr to gpr area --
3212//orig unfix_impure_ipr r1
3213//orig fix_impure_gpr r1
3214//orig // restore all integer regs
3215//orig#define t 4
3216//orig .repeat 28
3217//orig restore_reg \t
3218//orig#define t t + 1
3219//orig .endr
3220
3221// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
3222
3223 lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
3224 lda r1, 0x200(r1) // Point to center of CPU segment
3225
3226 RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
3227 RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
3228 RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
3229 RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
3230 RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
3231 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
3232 RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
3233 RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
3234 RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
3235 RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
3236 RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
3237 RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
3238 RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
3239 RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
3240 RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
3241 RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
3242 RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
3243 RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
3244 RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
3245 RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
3246 RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
3247 RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
3248 RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
3249 RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
3250 RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
3251 RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
3252 RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
3253 RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
3254
3255//orig // switch impure pointer from gpr to ipr area --
3256//orig unfix_impure_gpr r1
3257//orig fix_impure_ipr r1
3258//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
3259
3260 lda t0, -0x200(t0) // Restore base address of impure area.
3261 lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
3262 RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
3263
3264//orig // and back again --
3265//orig unfix_impure_ipr r1
3266//orig fix_impure_gpr r1
3267//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
3268//orig mb
3269
3270 lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
3271 lda t0, 0x200(t0) // and back to center of CPU segment
3272 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
3273 mb
3274
3275//orig // and back we go
3276//orig// restore_reg 3
3277//orig restore_reg 2
3278//orig// restore_reg 1
3279//orig restore_reg 0
3280//orig // restore impure area base
3281//orig unfix_impure_gpr r1
3282
3283 RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
3284 RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
3285 lda r1, -0x200(r1) // Restore impure base address
3286
3287 mfpr r31, pt0 // stall for ldqp above //orig
3288
3289 mtpr r31, dtb_ia // clear the tb //orig
3290 mtpr r31, itb_ia // clear the itb //orig
3291
3292//orig pvc_jsr rststa, bsr=1, dest=1
3293 ret r31, (r3) // back we go //orig
3294#endif
3295
3296
3297//+
3298// pal_pal_bug_check -- code has found a bugcheck situation.
3299// Set things up and join common machine check flow.
3300//
3301// Input:
3302// r14 - exc_addr
3303//
3304// On exit:
3305// pt0 - saved r0
3306// pt1 - saved r1
3307// pt4 - saved r4
3308// pt5 - saved r5
3309// pt6 - saved r6
3310// pt10 - saved exc_addr
3311// pt_misc<47:32> - mchk code
3312// pt_misc<31:16> - scb vector
3313// r14 - base of Cbox IPRs in IO space
3314// MCES<mchk> is set
3315//-
3316
3317 ALIGN_BLOCK
3318 .globl pal_pal_bug_check_from_int
3319pal_pal_bug_check_from_int:
3320 DEBUGSTORE(0x79)
3321//simos DEBUG_EXC_ADDR()
3322 DEBUGSTORE(0x20)
3323//simos bsr r25, put_hex
3324 lda r25, mchk_c_bugcheck(r31)
3325 addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
3326 br r31, pal_pal_mchk
3327 nop
3328
3329pal_pal_bug_check:
3330 lda r25, mchk_c_bugcheck(r31)
3331
3332pal_pal_mchk:
3333 sll r25, 32, r25 // Move mchk code to position
3334
3335 mtpr r14, pt10 // Stash exc_addr
3336 mtpr r14, exc_addr
3337
3338 mfpr r12, pt_misc // Get MCES and scratch
3339 zap r12, 0x3c, r12
3340
3341 or r12, r25, r12 // Combine mchk code
3342 lda r25, scb_v_procmchk(r31) // Get SCB vector
3343
3344 sll r25, 16, r25 // Move SCBv to position
3345 or r12, r25, r25 // Combine SCBv
3346
3347 mtpr r0, pt0 // Stash for scratch
3348 bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
3349
3350 mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
3351 ldah r14, 0xfff0(r31)
3352
3353 mtpr r1, pt1 // Stash for scratch
3354 zap r14, 0xE0, r14 // Get Cbox IPR base
3355
3356 mtpr r4, pt4
3357 mtpr r5, pt5
3358
3359 mtpr r6, pt6
3360 blbs r12, sys_double_machine_check // MCHK halt if double machine check
3361
3362 br r31, sys_mchk_collect_iprs // Join common machine check flow
3363
3364// align_to_call_pal_section // Align to address of first call_pal entry point - 2000
3365
3366// .sbttl "HALT - PALcode for HALT instruction"
3367
3368//+
3369//
3370// Entry:
3371// Vectored into via hardware PALcode instruction dispatch.
3372//
3373// Function:
3374// GO to console code
3375//
3376//-
3377
3378 .text 1
3379// . = 0x2000
3380 CALL_PAL_PRIV(PAL_HALT_ENTRY)
3381call_pal_halt:
3382#if rax_mode == 0
3383 mfpr r31, pt0 // Pad exc_addr read
3384 mfpr r31, pt0
3385
3386 mfpr r12, exc_addr // get PC
3387 subq r12, 4, r12 // Point to the HALT
3388
3389 mtpr r12, exc_addr
3390 mtpr r0, pt0
3391
3392//orig pvc_jsr updpcb, bsr=1
3393 bsr r0, pal_update_pcb // update the pcb
3394 lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
3395 br r31, sys_enter_console // enter the console
3396
3397#else // RAX mode
3398 mb
3399 mb
3400 mtpr r9, ev5__dtb_asn // no Dstream virtual ref for next 3 cycles.
3401 mtpr r9, ev5__itb_asn // E1. Update ITB ASN. No hw_rei for 5 cycles.
3402 mtpr r8, exc_addr // no HW_REI for 1 cycle.
3403 blbc r9, not_begin_case
3404 mtpr r31, ev5__dtb_ia // clear DTB. No Dstream virtual ref for 2 cycles.
3405 mtpr r31, ev5__itb_ia // clear ITB.
3406
3407not_begin_case:
3408 nop
3409 nop
3410
3411 nop
3412 nop // pad mt itb_asn ->hw_rei_stall
3413
3414 hw_rei_stall
3415#endif
3416
3417// .sbttl "CFLUSH- PALcode for CFLUSH instruction"
3418
3419//+
3420//
3421// Entry:
3422// Vectored into via hardware PALcode instruction dispatch.
3423//
3424// R16 - contains the PFN of the page to be flushed
3425//
3426// Function:
3427// Flush all Dstream caches of 1 entire page
3428// The CFLUSH routine is in the system specific module.
3429//
3430//-
3431
3432 CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
3433Call_Pal_Cflush:
3434 br r31, sys_cflush
3435
3436// .sbttl "DRAINA - PALcode for DRAINA instruction"
3437//+
3438//
3439// Entry:
3440// Vectored into via hardware PALcode instruction dispatch.
3441// Implicit TRAPB performed by hardware.
3442//
3443// Function:
3444// Stall instruction issue until all prior instructions are guaranteed to
3445// complete without incurring aborts. For the EV5 implementation, this
3446// means waiting until all pending DREADS are returned.
3447//
3448//-
3449
3450 CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
3451Call_Pal_Draina:
3452 ldah r14, 0x100(r31) // Init counter. Value?
3453 nop
3454
3455DRAINA_LOOP:
3456 subq r14, 1, r14 // Decrement counter
3457 mfpr r13, ev5__maf_mode // Fetch status bit
3458
3459 srl r13, maf_mode_v_dread_pending, r13
3460 ble r14, DRAINA_LOOP_TOO_LONG
3461
3462 nop
3463 blbs r13, DRAINA_LOOP // Wait until all DREADS clear
3464
3465 hw_rei
3466
3467DRAINA_LOOP_TOO_LONG:
3468 br r31, call_pal_halt
3469
3470// .sbttl "CALL_PAL OPCDECs"
3471
3472 CALL_PAL_PRIV(0x0003)
3473CallPal_OpcDec03:
3474 br r31, osfpal_calpal_opcdec
3475
3476 CALL_PAL_PRIV(0x0004)
3477CallPal_OpcDec04:
3478 br r31, osfpal_calpal_opcdec
3479
3480 CALL_PAL_PRIV(0x0005)
3481CallPal_OpcDec05:
3482 br r31, osfpal_calpal_opcdec
3483
3484 CALL_PAL_PRIV(0x0006)
3485CallPal_OpcDec06:
3486 br r31, osfpal_calpal_opcdec
3487
3488 CALL_PAL_PRIV(0x0007)
3489CallPal_OpcDec07:
3490 br r31, osfpal_calpal_opcdec
3491
3492 CALL_PAL_PRIV(0x0008)
3493CallPal_OpcDec08:
3494 br r31, osfpal_calpal_opcdec
3495
3496// .sbttl "CSERVE- PALcode for CSERVE instruction"
3497//+
3498//
3499// Entry:
3500// Vectored into via hardware PALcode instruction dispatch.
3501//
3502// Function:
3503// Various functions for private use of console software
3504//
3505// option selector in r0
3506// arguments in r16....
3507// The CSERVE routine is in the system specific module.
3508//
3509//-
3510
3511 CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
3512Call_Pal_Cserve:
3513 br r31, sys_cserve
3514
3515// .sbttl "swppal - PALcode for swppal instruction"
3516
3517//+
3518//
3519// Entry:
3520// Vectored into via hardware PALcode instruction dispatch.
3521// Vectored into via hardware PALcode instruction dispatch.
3522// R16 contains the new PAL identifier
3523// R17:R21 contain implementation-specific entry parameters
3524//
3525// R0 receives status:
3526// 0 success (PAL was switched)
3527// 1 unknown PAL variant
3528// 2 known PAL variant, but PAL not loaded
3529//
3530//
3531// Function:
3532// Swap control to another PAL.
3533//-
3534
3535 CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
3536Call_Pal_Swppal:
3537 cmpule r16, 255, r0 // see if a kibble was passed
3538 cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
3539
3540 or r16, r31, r3 // set r3 incase this is a address
3541 blbc r0, swppal_cont // nope, try it as an address
3542
3543 cmpeq r16, 2, r0 // is it our friend OSF?
3544 blbc r0, swppal_fail // nope, don't know this fellow
3545
3546 br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
3547
3548// .global osfpal_hw_entry_reset
3549// .weak osfpal_hw_entry_reset
3550// .long <osfpal_hw_entry_reset-pal_start>
3551//orig halt // don't know how to get the address here - kludge ok, load pal at 0
3552 .long 0 // ?? hack upon hack...pb
3553
3554CALL_PAL_SWPPAL_10_: ldlp r3, 0(r2) // fetch target addr
3555// ble r3, swppal_fail ; if OSF not linked in say not loaded.
3556 mfpr r2, pal_base // fetch pal base
3557
3558 addq r2, r3, r3 // add pal base
3559 lda r2, 0x3FFF(r31) // get pal base checker mask
3560
3561 and r3, r2, r2 // any funky bits set?
3562 cmpeq r2, 0, r0 //
3563
3564 blbc r0, swppal_fail // return unknown if bad bit set.
3565 br r31, swppal_cont
3566
3567// .sbttl "CALL_PAL OPCDECs"
3568
3569 CALL_PAL_PRIV(0x000B)
3570CallPal_OpcDec0B:
3571 br r31, osfpal_calpal_opcdec
3572
3573 CALL_PAL_PRIV(0x000C)
3574CallPal_OpcDec0C:
3575 br r31, osfpal_calpal_opcdec
3576
3577// .sbttl "wripir- PALcode for wripir instruction"
3578//+
3579//
3580// Entry:
3581// Vectored into via hardware PALcode instruction dispatch.
3582// r16 = processor number to interrupt
3583//
3584// Function:
3585// IPIR <- R16
3586// Handled in system-specific code
3587//
3588// Exit:
3589// interprocessor interrupt is recorded on the target processor
3590// and is initiated when the proper enabling conditions are present.
3591//-
3592
3593 CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
3594Call_Pal_Wrpir:
3595 br r31, sys_wripir
3596
3597// .sbttl "CALL_PAL OPCDECs"
3598
3599 CALL_PAL_PRIV(0x000E)
3600CallPal_OpcDec0E:
3601 br r31, osfpal_calpal_opcdec
3602
3603 CALL_PAL_PRIV(0x000F)
3604CallPal_OpcDec0F:
3605 br r31, osfpal_calpal_opcdec
3606
3607// .sbttl "rdmces- PALcode for rdmces instruction"
3608
3609//+
3610//
3611// Entry:
3612// Vectored into via hardware PALcode instruction dispatch.
3613//
3614// Function:
3615// R0 <- ZEXT(MCES)
3616//-
3617
3618 CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
3619Call_Pal_Rdmces:
3620 mfpr r0, pt_mces // Read from PALtemp
3621 and r0, mces_m_all, r0 // Clear other bits
3622
3623 hw_rei
3624
3625// .sbttl "wrmces- PALcode for wrmces instruction"
3626
3627//+
3628//
3629// Entry:
3630// Vectored into via hardware PALcode instruction dispatch.
3631//
3632// Function:
3633// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
3634// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
3635// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
3636// MCES<3> <- R16<3> (DPC)
3637// MCES<4> <- R16<4> (DSC)
3638//
3639//-
3640
3641 CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
3642Call_Pal_Wrmces:
3643 and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
3644 mfpr r14, pt_mces // Get current value
3645
3646 ornot r31, r13, r13 // Flip all the bits
3647 and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
3648
3649 and r14, r13, r1 // Update MCHK, SCE, PCE
3650 bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
3651
3652 or r1, r17, r1 // Update DPC and DSC
3653 mtpr r1, pt_mces // Write MCES back
3654
3655#if rawhide_system == 0
3656 nop // Pad to fix PT write->read restriction
3657#else
3658 blbs r16, RAWHIDE_clear_mchk_lock // Clear logout from lock
3659#endif
3660
3661 nop
3662 hw_rei
3663
3664
3665
3666// .sbttl "CALL_PAL OPCDECs"
3667
3668 CALL_PAL_PRIV(0x0012)
3669CallPal_OpcDec12:
3670 br r31, osfpal_calpal_opcdec
3671
3672 CALL_PAL_PRIV(0x0013)
3673CallPal_OpcDec13:
3674 br r31, osfpal_calpal_opcdec
3675
3676 CALL_PAL_PRIV(0x0014)
3677CallPal_OpcDec14:
3678 br r31, osfpal_calpal_opcdec
3679
3680 CALL_PAL_PRIV(0x0015)
3681CallPal_OpcDec15:
3682 br r31, osfpal_calpal_opcdec
3683
3684 CALL_PAL_PRIV(0x0016)
3685CallPal_OpcDec16:
3686 br r31, osfpal_calpal_opcdec
3687
3688 CALL_PAL_PRIV(0x0017)
3689CallPal_OpcDec17:
3690 br r31, osfpal_calpal_opcdec
3691
3692 CALL_PAL_PRIV(0x0018)
3693CallPal_OpcDec18:
3694 br r31, osfpal_calpal_opcdec
3695
3696 CALL_PAL_PRIV(0x0019)
3697CallPal_OpcDec19:
3698 br r31, osfpal_calpal_opcdec
3699
3700 CALL_PAL_PRIV(0x001A)
3701CallPal_OpcDec1A:
3702 br r31, osfpal_calpal_opcdec
3703
3704 CALL_PAL_PRIV(0x001B)
3705CallPal_OpcDec1B:
3706 br r31, osfpal_calpal_opcdec
3707
3708 CALL_PAL_PRIV(0x001C)
3709CallPal_OpcDec1C:
3710 br r31, osfpal_calpal_opcdec
3711
3712 CALL_PAL_PRIV(0x001D)
3713CallPal_OpcDec1D:
3714 br r31, osfpal_calpal_opcdec
3715
3716 CALL_PAL_PRIV(0x001E)
3717CallPal_OpcDec1E:
3718 br r31, osfpal_calpal_opcdec
3719
3720 CALL_PAL_PRIV(0x001F)
3721CallPal_OpcDec1F:
3722 br r31, osfpal_calpal_opcdec
3723
3724 CALL_PAL_PRIV(0x0020)
3725CallPal_OpcDec20:
3726 br r31, osfpal_calpal_opcdec
3727
3728 CALL_PAL_PRIV(0x0021)
3729CallPal_OpcDec21:
3730 br r31, osfpal_calpal_opcdec
3731
3732 CALL_PAL_PRIV(0x0022)
3733CallPal_OpcDec22:
3734 br r31, osfpal_calpal_opcdec
3735
3736 CALL_PAL_PRIV(0x0023)
3737CallPal_OpcDec23:
3738 br r31, osfpal_calpal_opcdec
3739
3740 CALL_PAL_PRIV(0x0024)
3741CallPal_OpcDec24:
3742 br r31, osfpal_calpal_opcdec
3743
3744 CALL_PAL_PRIV(0x0025)
3745CallPal_OpcDec25:
3746 br r31, osfpal_calpal_opcdec
3747
3748 CALL_PAL_PRIV(0x0026)
3749CallPal_OpcDec26:
3750 br r31, osfpal_calpal_opcdec
3751
3752 CALL_PAL_PRIV(0x0027)
3753CallPal_OpcDec27:
3754 br r31, osfpal_calpal_opcdec
3755
3756 CALL_PAL_PRIV(0x0028)
3757CallPal_OpcDec28:
3758 br r31, osfpal_calpal_opcdec
3759
3760 CALL_PAL_PRIV(0x0029)
3761CallPal_OpcDec29:
3762 br r31, osfpal_calpal_opcdec
3763
3764 CALL_PAL_PRIV(0x002A)
3765CallPal_OpcDec2A:
3766 br r31, osfpal_calpal_opcdec
3767
3768// .sbttl "wrfen - PALcode for wrfen instruction"
3769
3770//+
3771//
3772// Entry:
3773// Vectored into via hardware PALcode instruction dispatch.
3774//
3775// Function:
3776// a0<0> -> ICSR<FPE>
3777// Store new FEN in PCB
3778// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16) are UNPREDICTABLE
3779//
3780// Issue: What about pending FP loads when FEN goes from on->off????
3781//-
3782
3783 CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
3784Call_Pal_Wrfen:
3785 or r31, 1, r13 // Get a one
3786 mfpr r1, ev5__icsr // Get current FPE
3787
3788 sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
3789 and r16, 1, r16 // clean new fen
3790
3791 sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
3792 bic r1, r13, r1 // zero icsr<fpe>
3793
3794 or r1, r12, r1 // Or new FEN into ICSR
3795 mfpr r12, pt_pcbb // Get PCBB - E1
3796
3797 mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
3798 stlp r16, osfpcb_q_fen(r12) // Store FEN in PCB.
3799
3800 mfpr r31, pt0 // Pad ICSR<FPE> write.
3801 mfpr r31, pt0
3802
3803 mfpr r31, pt0
3804// pvc_violate 225 // cuz PVC can't distinguish which bits changed
3805 hw_rei
3806
3807
3808 CALL_PAL_PRIV(0x002C)
3809CallPal_OpcDec2C:
3810 br r31, osfpal_calpal_opcdec
3811
3812// .sbttl "wrvptpr - PALcode for wrvptpr instruction"
3813//+
3814//
3815// Entry:
3816// Vectored into via hardware PALcode instruction dispatch.
3817//
3818// Function:
3819// vptptr <- a0 (r16)
3820//-
3821
3822 CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
3823Call_Pal_Wrvptptr:
3824 mtpr r16, ev5__mvptbr // Load Mbox copy
3825 mtpr r16, ev5__ivptbr // Load Ibox copy
3826 nop // Pad IPR write
3827 nop
3828 hw_rei
3829
3830 CALL_PAL_PRIV(0x002E)
3831CallPal_OpcDec2E:
3832 br r31, osfpal_calpal_opcdec
3833
3834 CALL_PAL_PRIV(0x002F)
3835CallPal_OpcDec2F:
3836 br r31, osfpal_calpal_opcdec
3837
3838// .sbttl "swpctx- PALcode for swpctx instruction"
3839
3840//+
3841//
3842// Entry:
3843// hardware dispatch via callPal instruction
3844// R16 -> new pcb
3845//
3846// Function:
3847// dynamic state moved to old pcb
3848// new state loaded from new pcb
3849// pcbb pointer set
3850// old pcbb returned in R0
3851//
3852// Note: need to add perf monitor stuff
3853//-
3854
3855 CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
3856Call_Pal_Swpctx:
3857 rpcc r13 // get cyccounter
3858 mfpr r0, pt_pcbb // get pcbb
3859
3860 ldqp r22, osfpcb_q_fen(r16) // get new fen/pme
3861 ldqp r23, osfpcb_l_cc(r16) // get new asn
3862
3863 srl r13, 32, r25 // move offset
3864 mfpr r24, pt_usp // get usp
3865
3866 stqp r30, osfpcb_q_ksp(r0) // store old ksp
3867// pvc_violate 379 // stqp can't trap except replay. only problem if mf same ipr in same shadow.
3868 mtpr r16, pt_pcbb // set new pcbb
3869
3870 stqp r24, osfpcb_q_usp(r0) // store usp
3871 addl r13, r25, r25 // merge for new time
3872
3873 stlp r25, osfpcb_l_cc(r0) // save time
3874 ldah r24, (1<<(icsr_v_fpe-16))(r31)
3875
3876 and r22, 1, r12 // isolate fen
3877 mfpr r25, icsr // get current icsr
3878
3879 ev5_pass2 lda r24, (1<<icsr_v_pmp)(r24)
3880 br r31, swpctx_cont
3881
3882// .sbttl "wrval - PALcode for wrval instruction"
3883//+
3884//
3885// Entry:
3886// Vectored into via hardware PALcode instruction dispatch.
3887//
3888// Function:
3889// sysvalue <- a0 (r16)
3890//-
3891
3892 CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
3893Call_Pal_Wrval:
3894 nop
3895 mtpr r16, pt_sysval // Pad paltemp write
3896 nop
3897 nop
3898 hw_rei
3899
3900
3901// .sbttl "rdval - PALcode for rdval instruction"
3902
3903//+
3904//
3905// Entry:
3906// Vectored into via hardware PALcode instruction dispatch.
3907//
3908// Function:
3909// v0 (r0) <- sysvalue
3910//-
3911
3912 CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
3913Call_Pal_Rdval:
3914 nop
3915 mfpr r0, pt_sysval
3916 nop
3917 hw_rei
3918
3919// .sbttl "tbi - PALcode for tbi instruction"
3920//+
3921//
3922// Entry:
3923// Vectored into via hardware PALcode instruction dispatch.
3924//
3925// Function:
3926// TB invalidate
3927// r16/a0 = TBI type
3928// r17/a1 = Va for TBISx instructions
3929//-
3930
3931 CALL_PAL_PRIV(PAL_TBI_ENTRY)
3932Call_Pal_Tbi:
3933 addq r16, 2, r16 // change range to 0-2
3934 br r23, CALL_PAL_tbi_10_ // get our address
3935
3936CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
3937 lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
3938 sll r16, 4, r16 // * 16
3939 blbc r22, CALL_PAL_tbi_30_ // go rei, if not
3940
3941 addq r23, r16, r23 // addr of our code
3942//orig pvc_jsr tbi
3943 jmp r31, (r23) // and go do it
3944
3945CALL_PAL_tbi_30_:
3946 hw_rei
3947 nop
3948
3949// .sbttl "wrent - PALcode for wrent instruction"
3950//+
3951//
3952// Entry:
3953// Vectored into via hardware PALcode instruction dispatch.
3954//
3955// Function:
3956// Update ent* in paltemps
3957// r16/a0 = Address of entry routine
3958// r17/a1 = Entry Number 0..5
3959//
3960// r22, r23 trashed
3961//-
3962
3963 CALL_PAL_PRIV(PAL_WRENT_ENTRY)
3964Call_Pal_Wrent:
3965 cmpult r17, 6, r22 // see if in range
3966 br r23, CALL_PAL_wrent_10_ // get our address
3967
3968CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
3969 blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
3970
3971 lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
3972 sll r17, 4, r17 // *16
3973
3974 addq r17, r23, r23 // Get address in table
3975//orig pvc_jsr wrent
3976 jmp r31, (r23) // and go do it
3977
3978CALL_PAL_wrent_30_:
3979 hw_rei // out of range, just return
3980
3981// .sbttl "swpipl - PALcode for swpipl instruction"
3982//+
3983//
3984// Entry:
3985// Vectored into via hardware PALcode instruction dispatch.
3986//
3987// Function:
3988// v0 (r0) <- PS<IPL>
3989// PS<IPL> <- a0<2:0> (r16)
3990//
3991// t8 (r22) is scratch
3992//-
3993
3994 CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3995Call_Pal_Swpipl:
3996 and r16, osfps_m_ipl, r16 // clean New ipl
3997 mfpr r22, pt_intmask // get int mask
3998
3999 extbl r22, r16, r22 // get mask for this ipl
4000 bis r11, r31, r0 // return old ipl
4001
4002 bis r16, r31, r11 // set new ps
4003 mtpr r22, ev5__ipl // set new mask
4004
4005 mfpr r31, pt0 // pad ipl write
4006 mfpr r31, pt0 // pad ipl write
4007
4008 hw_rei // back
4009
4010// .sbttl "rdps - PALcode for rdps instruction"
4011//+
4012//
4013// Entry:
4014// Vectored into via hardware PALcode instruction dispatch.
4015//
4016// Function:
4017// v0 (r0) <- ps
4018//-
4019
4020 CALL_PAL_PRIV(PAL_RDPS_ENTRY)
4021Call_Pal_Rdps:
4022 bis r11, r31, r0 // Fetch PALshadow PS
4023 nop // Must be 2 cycles long
4024 hw_rei
4025
4026// .sbttl "wrkgp - PALcode for wrkgp instruction"
4027//+
4028//
4029// Entry:
4030// Vectored into via hardware PALcode instruction dispatch.
4031//
4032// Function:
4033// kgp <- a0 (r16)
4034//-
4035
4036 CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
4037Call_Pal_Wrkgp:
4038 nop
4039 mtpr r16, pt_kgp
4040 nop // Pad for pt write->read restriction
4041 nop
4042 hw_rei
4043
4044// .sbttl "wrusp - PALcode for wrusp instruction"
4045//+
4046//
4047// Entry:
4048// Vectored into via hardware PALcode instruction dispatch.
4049//
4050// Function:
4051// usp <- a0 (r16)
4052//-
4053
4054 CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
4055Call_Pal_Wrusp:
4056 nop
4057 mtpr r16, pt_usp
4058 nop // Pad possible pt write->read restriction
4059 nop
4060 hw_rei
4061
4062// .sbttl "wrperfmon - PALcode for wrperfmon instruction"
4063//+
4064//
4065// Entry:
4066// Vectored into via hardware PALcode instruction dispatch.
4067//
4068//
4069// Function:
4070// Various control functions for the onchip performance counters
4071//
4072// option selector in r16
4073// option argument in r17
4074// returned status in r0
4075//
4076//
4077// r16 = 0 Disable performance monitoring for one or more cpu's
4078// r17 = 0 disable no counters
4079// r17 = bitmask disable counters specified in bit mask (1=disable)
4080//
4081// r16 = 1 Enable performance monitoring for one or more cpu's
4082// r17 = 0 enable no counters
4083// r17 = bitmask enable counters specified in bit mask (1=enable)
4084//
4085// r16 = 2 Mux select for one or more cpu's
4086// r17 = Mux selection (cpu specific)
4087// <24:19> bc_ctl<pm_mux_sel> field (see spec)
4088// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
4089//
4090// r16 = 3 Options
4091// r17 = (cpu specific)
4092// <0> = 0 log all processes
4093// <0> = 1 log only selected processes
4094// <30,9,8> mode select - ku,kp,kk
4095//
4096// r16 = 4 Interrupt frequency select
4097// r17 = (cpu specific) indicates interrupt frequencies desired for each
4098// counter, with "zero interrupts" being an option
4099// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
4100//
4101// r16 = 5 Read Counters
4102// r17 = na
4103// r0 = value (same format as ev5 pmctr)
4104// <0> = 0 Read failed
4105// <0> = 1 Read succeeded
4106//
4107// r16 = 6 Write Counters
4108// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
4109//
4110// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
4111// r17 = 0 enable no counters
4112// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
4113//
4114//=============================================================================
4115//Assumptions:
4116//PMCTR_CTL:
4117//
4118// <15:14> CTL0 -- encoded frequency select and enable - CTR0
4119// <13:12> CTL1 -- " - CTR1
4120// <11:10> CTL2 -- " - CTR2
4121//
4122// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
4123// <7:6> FRQ1 -- frequency select for CTR1
4124// <5:4> FRQ2 -- frequency select for CTR2
4125//
4126// <0> all vs. select processes (0=all,1=select)
4127//
4128// where
4129// FRQx<1:0>
4130// 0 1 disable interrupt
4131// 1 0 frequency = 65536 (16384 for ctr2)
4132// 1 1 frequency = 256
4133// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
4134//
4135//=============================================================================
4136//
4137 CALL_PAL_PRIV(0x0039)
4138// unsupported in Hudson code .. pboyle Nov/95
4139CALL_PAL_Wrperfmon:
4140#if perfmon_debug == 0
4141 // "real" performance monitoring code
4142 cmpeq r16, 1, r0 // check for enable
4143 bne r0, perfmon_en // br if requested to enable
4144
4145 cmpeq r16, 2, r0 // check for mux ctl
4146 bne r0, perfmon_muxctl // br if request to set mux controls
4147
4148 cmpeq r16, 3, r0 // check for options
4149 bne r0, perfmon_ctl // br if request to set options
4150
4151 cmpeq r16, 4, r0 // check for interrupt frequency select
4152 bne r0, perfmon_freq // br if request to change frequency select
4153
4154 cmpeq r16, 5, r0 // check for counter read request
4155 bne r0, perfmon_rd // br if request to read counters
4156
4157 cmpeq r16, 6, r0 // check for counter write request
4158 bne r0, perfmon_wr // br if request to write counters
4159
4160 cmpeq r16, 7, r0 // check for counter clear/enable request
4161 bne r0, perfmon_enclr // br if request to clear/enable counters
4162
4163 beq r16, perfmon_dis // br if requested to disable (r16=0)
4164 br r31, perfmon_unknown // br if unknown request
4165#else
4166
4167 br r31, pal_perfmon_debug
4168#endif
4169
4170// .sbttl "rdusp - PALcode for rdusp instruction"
4171//+
4172//
4173// Entry:
4174// Vectored into via hardware PALcode instruction dispatch.
4175//
4176// Function:
4177// v0 (r0) <- usp
4178//-
4179
4180 CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
4181Call_Pal_Rdusp:
4182 nop
4183 mfpr r0, pt_usp
4184 hw_rei
4185
4186
4187 CALL_PAL_PRIV(0x003B)
4188CallPal_OpcDec3B:
4189 br r31, osfpal_calpal_opcdec
4190
4191// .sbttl "whami - PALcode for whami instruction"
4192//+
4193//
4194// Entry:
4195// Vectored into via hardware PALcode instruction dispatch.
4196//
4197// Function:
4198// v0 (r0) <- whami
4199//-
4200 CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
4201Call_Pal_Whami:
4202 nop
4203 mfpr r0, pt_whami // Get Whami
4204 extbl r0, 1, r0 // Isolate just whami bits
4205 hw_rei
4206
4207// .sbttl "retsys - PALcode for retsys instruction"
4208//
4209// Entry:
4210// Vectored into via hardware PALcode instruction dispatch.
4211// 00(sp) contains return pc
4212// 08(sp) contains r29
4213//
4214// Function:
4215// Return from system call.
4216// mode switched from kern to user.
4217// stacks swapped, ugp, upc restored.
4218// r23, r25 junked
4219//-
4220
4221 CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
4222Call_Pal_Retsys:
4223 lda r25, osfsf_c_size(sp) // pop stack
4224 bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
4225
4226 mfpr r14, exc_addr // save exc_addr in case of fault
4227 ldq r23, osfsf_pc(sp) // get pc
4228
4229 ldq r29, osfsf_gp(sp) // get gp
4230 stl_c r31, -4(sp) // clear lock_flag
4231
4232 lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
4233 mfpr r30, pt_usp // get users stack
4234
4235 bic r23, 3, r23 // clean return pc
4236 mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
4237
4238 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
4239 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
4240
4241 mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
4242 mtpr r25, pt_ksp // save kern stack
4243
4244 rc r31 // clear inter_flag
4245// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
4246 hw_rei_spe // and back
4247
4248
4249 CALL_PAL_PRIV(0x003E)
4250CallPal_OpcDec3E:
4251 br r31, osfpal_calpal_opcdec
4252
4253// .sbttl "rti - PALcode for rti instruction"
4254//+
4255//
4256// Entry:
4257// Vectored into via hardware PALcode instruction dispatch.
4258//
4259// Function:
4260// 00(sp) -> ps
4261// 08(sp) -> pc
4262// 16(sp) -> r29 (gp)
4263// 24(sp) -> r16 (a0)
4264// 32(sp) -> r17 (a1)
4265// 40(sp) -> r18 (a3)
4266//-
4267
4268 CALL_PAL_PRIV(PAL_RTI_ENTRY)
4269#ifdef SIMOS
4270 /* called once by platform_tlaser */
4271 .globl Call_Pal_Rti
4272#endif
4273Call_Pal_Rti:
4274 lda r25, osfsf_c_size(sp) // get updated sp
4275 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
4276
4277 mfpr r14, exc_addr // save PC in case of fault
4278 rc r31 // clear intr_flag
4279
4280 ldq r12, -6*8(r25) // get ps
4281 ldq r13, -5*8(r25) // pc
4282
4283 ldq r18, -1*8(r25) // a2
4284 ldq r17, -2*8(r25) // a1
4285
4286 ldq r16, -3*8(r25) // a0
4287 ldq r29, -4*8(r25) // gp
4288
4289 bic r13, 3, r13 // clean return pc
4290 stl_c r31, -4(r25) // clear lock_flag
4291
4292 and r12, osfps_m_mode, r11 // get mode
4293 mtpr r13, exc_addr // set return address
4294
4295 beq r11, rti_to_kern // br if rti to Kern
4296 br r31, rti_to_user // out of call_pal space
4297
4298
4299// .sbttl "Start the Unprivileged CALL_PAL Entry Points"
4300// .sbttl "bpt- PALcode for bpt instruction"
4301//+
4302//
4303// Entry:
4304// Vectored into via hardware PALcode instruction dispatch.
4305//
4306// Function:
4307// Build stack frame
4308// a0 <- code
4309// a1 <- unpred
4310// a2 <- unpred
4311// vector via entIF
4312//
4313//-
4314//
4315 .text 1
4316// . = 0x3000
4317 CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
4318Call_Pal_Bpt:
4319 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4320 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4321
4322 bis r11, r31, r12 // Save PS for stack write
4323 bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
4324
4325 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4326 // no virt ref for next 2 cycles
4327 mtpr r30, pt_usp // save user stack
4328
4329 bis r31, r31, r11 // Set new PS
4330 mfpr r30, pt_ksp
4331
4332CALL_PAL_bpt_10_:
4333 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4334 mfpr r14, exc_addr // get pc
4335
4336 stq r16, osfsf_a0(sp) // save regs
4337 bis r31, osf_a0_bpt, r16 // set a0
4338
4339 stq r17, osfsf_a1(sp) // a1
4340 br r31, bpt_bchk_common // out of call_pal space
4341
4342
4343// .sbttl "bugchk- PALcode for bugchk instruction"
4344//+
4345//
4346// Entry:
4347// Vectored into via hardware PALcode instruction dispatch.
4348//
4349// Function:
4350// Build stack frame
4351// a0 <- code
4352// a1 <- unpred
4353// a2 <- unpred
4354// vector via entIF
4355//
4356//-
4357//
4358 CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
4359Call_Pal_Bugchk:
4360 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4361 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4362
4363 bis r11, r31, r12 // Save PS for stack write
4364 bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
4365
4366 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4367 // no virt ref for next 2 cycles
4368 mtpr r30, pt_usp // save user stack
4369
4370 bis r31, r31, r11 // Set new PS
4371 mfpr r30, pt_ksp
4372
4373CALL_PAL_bugchk_10_:
4374 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4375 mfpr r14, exc_addr // get pc
4376
4377 stq r16, osfsf_a0(sp) // save regs
4378 bis r31, osf_a0_bugchk, r16 // set a0
4379
4380 stq r17, osfsf_a1(sp) // a1
4381 br r31, bpt_bchk_common // out of call_pal space
4382
4383
4384 CALL_PAL_UNPRIV(0x0082)
4385CallPal_OpcDec82:
4386 br r31, osfpal_calpal_opcdec
4387
4388// .sbttl "callsys - PALcode for callsys instruction"
4389//+
4390//
4391// Entry:
4392// Vectored into via hardware PALcode instruction dispatch.
4393//
4394// Function:
4395// Switch mode to kernel and build a callsys stack frame.
4396// sp = ksp
4397// gp = kgp
4398// t8 - t10 (r22-r24) trashed
4399//
4400//-
4401//
4402 CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
4403Call_Pal_Callsys:
4404
4405 and r11, osfps_m_mode, r24 // get mode
4406 mfpr r22, pt_ksp // get ksp
4407
4408 beq r24, sys_from_kern // sysCall from kern is not allowed
4409 mfpr r12, pt_entsys // get address of callSys routine
4410
4411//+
4412// from here on we know we are in user going to Kern
4413//-
4414 mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
4415 mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
4416
4417 bis r31, r31, r11 // PS=0 (mode=kern)
4418 mfpr r23, exc_addr // get pc
4419
4420 mtpr r30, pt_usp // save usp
4421 lda sp, 0-osfsf_c_size(r22)// set new sp
4422
4423 stq r29, osfsf_gp(sp) // save user gp/r29
4424 stq r24, osfsf_ps(sp) // save ps
4425
4426 stq r23, osfsf_pc(sp) // save pc
4427 mtpr r12, exc_addr // set address
4428 // 1 cycle to hw_rei
4429
4430 mfpr r29, pt_kgp // get the kern gp/r29
4431
4432 hw_rei_spe // and off we go!
4433
4434
4435 CALL_PAL_UNPRIV(0x0084)
4436CallPal_OpcDec84:
4437 br r31, osfpal_calpal_opcdec
4438
4439 CALL_PAL_UNPRIV(0x0085)
4440CallPal_OpcDec85:
4441 br r31, osfpal_calpal_opcdec
4442
4443// .sbttl "imb - PALcode for imb instruction"
4444//+
4445//
4446// Entry:
4447// Vectored into via hardware PALcode instruction dispatch.
4448//
4449// Function:
4450// Flush the writebuffer and flush the Icache
4451//
4452//-
4453//
4454 CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
4455Call_Pal_Imb:
4456 mb // Clear the writebuffer
4457 mfpr r31, ev5__mcsr // Sync with clear
4458 nop
4459 nop
4460 br r31, pal_ic_flush // Flush Icache
4461
4462
4463// .sbttl "CALL_PAL OPCDECs"
4464
4465 CALL_PAL_UNPRIV(0x0087)
4466CallPal_OpcDec87:
4467 br r31, osfpal_calpal_opcdec
4468
4469 CALL_PAL_UNPRIV(0x0088)
4470CallPal_OpcDec88:
4471 br r31, osfpal_calpal_opcdec
4472
4473 CALL_PAL_UNPRIV(0x0089)
4474CallPal_OpcDec89:
4475 br r31, osfpal_calpal_opcdec
4476
4477 CALL_PAL_UNPRIV(0x008A)
4478CallPal_OpcDec8A:
4479 br r31, osfpal_calpal_opcdec
4480
4481 CALL_PAL_UNPRIV(0x008B)
4482CallPal_OpcDec8B:
4483 br r31, osfpal_calpal_opcdec
4484
4485 CALL_PAL_UNPRIV(0x008C)
4486CallPal_OpcDec8C:
4487 br r31, osfpal_calpal_opcdec
4488
4489 CALL_PAL_UNPRIV(0x008D)
4490CallPal_OpcDec8D:
4491 br r31, osfpal_calpal_opcdec
4492
4493 CALL_PAL_UNPRIV(0x008E)
4494CallPal_OpcDec8E:
4495 br r31, osfpal_calpal_opcdec
4496
4497 CALL_PAL_UNPRIV(0x008F)
4498CallPal_OpcDec8F:
4499 br r31, osfpal_calpal_opcdec
4500
4501 CALL_PAL_UNPRIV(0x0090)
4502CallPal_OpcDec90:
4503 br r31, osfpal_calpal_opcdec
4504
4505 CALL_PAL_UNPRIV(0x0091)
4506CallPal_OpcDec91:
4507 br r31, osfpal_calpal_opcdec
4508
4509 CALL_PAL_UNPRIV(0x0092)
4510CallPal_OpcDec92:
4511 br r31, osfpal_calpal_opcdec
4512
4513 CALL_PAL_UNPRIV(0x0093)
4514CallPal_OpcDec93:
4515 br r31, osfpal_calpal_opcdec
4516
4517 CALL_PAL_UNPRIV(0x0094)
4518CallPal_OpcDec94:
4519 br r31, osfpal_calpal_opcdec
4520
4521 CALL_PAL_UNPRIV(0x0095)
4522CallPal_OpcDec95:
4523 br r31, osfpal_calpal_opcdec
4524
4525 CALL_PAL_UNPRIV(0x0096)
4526CallPal_OpcDec96:
4527 br r31, osfpal_calpal_opcdec
4528
4529 CALL_PAL_UNPRIV(0x0097)
4530CallPal_OpcDec97:
4531 br r31, osfpal_calpal_opcdec
4532
4533 CALL_PAL_UNPRIV(0x0098)
4534CallPal_OpcDec98:
4535 br r31, osfpal_calpal_opcdec
4536
4537 CALL_PAL_UNPRIV(0x0099)
4538CallPal_OpcDec99:
4539 br r31, osfpal_calpal_opcdec
4540
4541 CALL_PAL_UNPRIV(0x009A)
4542CallPal_OpcDec9A:
4543 br r31, osfpal_calpal_opcdec
4544
4545 CALL_PAL_UNPRIV(0x009B)
4546CallPal_OpcDec9B:
4547 br r31, osfpal_calpal_opcdec
4548
4549 CALL_PAL_UNPRIV(0x009C)
4550CallPal_OpcDec9C:
4551 br r31, osfpal_calpal_opcdec
4552
4553 CALL_PAL_UNPRIV(0x009D)
4554CallPal_OpcDec9D:
4555 br r31, osfpal_calpal_opcdec
4556
4557// .sbttl "rdunique - PALcode for rdunique instruction"
4558//+
4559//
4560// Entry:
4561// Vectored into via hardware PALcode instruction dispatch.
4562//
4563// Function:
4564// v0 (r0) <- unique
4565//
4566//-
4567//
4568 CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
4569CALL_PALrdunique_:
4570 mfpr r0, pt_pcbb // get pcb pointer
4571 ldqp r0, osfpcb_q_unique(r0) // get new value
4572
4573 hw_rei
4574
4575// .sbttl "wrunique - PALcode for wrunique instruction"
4576//+
4577//
4578// Entry:
4579// Vectored into via hardware PALcode instruction dispatch.
4580//
4581// Function:
4582// unique <- a0 (r16)
4583//
4584//-
4585//
4586CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
4587CALL_PAL_Wrunique:
4588 nop
4589 mfpr r12, pt_pcbb // get pcb pointer
4590 stqp r16, osfpcb_q_unique(r12)// get new value
4591 nop // Pad palshadow write
4592 hw_rei // back
4593
4594// .sbttl "CALL_PAL OPCDECs"
4595
4596 CALL_PAL_UNPRIV(0x00A0)
4597CallPal_OpcDecA0:
4598 br r31, osfpal_calpal_opcdec
4599
4600 CALL_PAL_UNPRIV(0x00A1)
4601CallPal_OpcDecA1:
4602 br r31, osfpal_calpal_opcdec
4603
4604 CALL_PAL_UNPRIV(0x00A2)
4605CallPal_OpcDecA2:
4606 br r31, osfpal_calpal_opcdec
4607
4608 CALL_PAL_UNPRIV(0x00A3)
4609CallPal_OpcDecA3:
4610 br r31, osfpal_calpal_opcdec
4611
4612 CALL_PAL_UNPRIV(0x00A4)
4613CallPal_OpcDecA4:
4614 br r31, osfpal_calpal_opcdec
4615
4616 CALL_PAL_UNPRIV(0x00A5)
4617CallPal_OpcDecA5:
4618 br r31, osfpal_calpal_opcdec
4619
4620 CALL_PAL_UNPRIV(0x00A6)
4621CallPal_OpcDecA6:
4622 br r31, osfpal_calpal_opcdec
4623
4624 CALL_PAL_UNPRIV(0x00A7)
4625CallPal_OpcDecA7:
4626 br r31, osfpal_calpal_opcdec
4627
4628 CALL_PAL_UNPRIV(0x00A8)
4629CallPal_OpcDecA8:
4630 br r31, osfpal_calpal_opcdec
4631
4632 CALL_PAL_UNPRIV(0x00A9)
4633CallPal_OpcDecA9:
4634 br r31, osfpal_calpal_opcdec
4635
4636
4637// .sbttl "gentrap - PALcode for gentrap instruction"
4638//+
4639// CALL_PAL_gentrap:
4640// Entry:
4641// Vectored into via hardware PALcode instruction dispatch.
4642//
4643// Function:
4644// Build stack frame
4645// a0 <- code
4646// a1 <- unpred
4647// a2 <- unpred
4648// vector via entIF
4649//
4650//-
4651
4652 CALL_PAL_UNPRIV(0x00AA)
4653// unsupported in Hudson code .. pboyle Nov/95
4654CALL_PAL_gentrap:
4655 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4656 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4657
4658 bis r11, r31, r12 // Save PS for stack write
4659 bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
4660
4661 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4662 // no virt ref for next 2 cycles
4663 mtpr r30, pt_usp // save user stack
4664
4665 bis r31, r31, r11 // Set new PS
4666 mfpr r30, pt_ksp
4667
4668CALL_PAL_gentrap_10_:
4669 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4670 mfpr r14, exc_addr // get pc
4671
4672 stq r16, osfsf_a0(sp) // save regs
4673 bis r31, osf_a0_gentrap, r16// set a0
4674
4675 stq r17, osfsf_a1(sp) // a1
4676 br r31, bpt_bchk_common // out of call_pal space
4677
4678
4679// .sbttl "CALL_PAL OPCDECs"
4680
4681 CALL_PAL_UNPRIV(0x00AB)
4682CallPal_OpcDecAB:
4683 br r31, osfpal_calpal_opcdec
4684
4685 CALL_PAL_UNPRIV(0x00AC)
4686CallPal_OpcDecAC:
4687 br r31, osfpal_calpal_opcdec
4688
4689 CALL_PAL_UNPRIV(0x00AD)
4690CallPal_OpcDecAD:
4691 br r31, osfpal_calpal_opcdec
4692
4693 CALL_PAL_UNPRIV(0x00AE)
4694CallPal_OpcDecAE:
4695 br r31, osfpal_calpal_opcdec
4696
4697 CALL_PAL_UNPRIV(0x00AF)
4698CallPal_OpcDecAF:
4699 br r31, osfpal_calpal_opcdec
4700
4701 CALL_PAL_UNPRIV(0x00B0)
4702CallPal_OpcDecB0:
4703 br r31, osfpal_calpal_opcdec
4704
4705 CALL_PAL_UNPRIV(0x00B1)
4706CallPal_OpcDecB1:
4707 br r31, osfpal_calpal_opcdec
4708
4709 CALL_PAL_UNPRIV(0x00B2)
4710CallPal_OpcDecB2:
4711 br r31, osfpal_calpal_opcdec
4712
4713 CALL_PAL_UNPRIV(0x00B3)
4714CallPal_OpcDecB3:
4715 br r31, osfpal_calpal_opcdec
4716
4717 CALL_PAL_UNPRIV(0x00B4)
4718CallPal_OpcDecB4:
4719 br r31, osfpal_calpal_opcdec
4720
4721 CALL_PAL_UNPRIV(0x00B5)
4722CallPal_OpcDecB5:
4723 br r31, osfpal_calpal_opcdec
4724
4725 CALL_PAL_UNPRIV(0x00B6)
4726CallPal_OpcDecB6:
4727 br r31, osfpal_calpal_opcdec
4728
4729 CALL_PAL_UNPRIV(0x00B7)
4730CallPal_OpcDecB7:
4731 br r31, osfpal_calpal_opcdec
4732
4733 CALL_PAL_UNPRIV(0x00B8)
4734CallPal_OpcDecB8:
4735 br r31, osfpal_calpal_opcdec
4736
4737 CALL_PAL_UNPRIV(0x00B9)
4738CallPal_OpcDecB9:
4739 br r31, osfpal_calpal_opcdec
4740
4741 CALL_PAL_UNPRIV(0x00BA)
4742CallPal_OpcDecBA:
4743 br r31, osfpal_calpal_opcdec
4744
4745 CALL_PAL_UNPRIV(0x00BB)
4746CallPal_OpcDecBB:
4747 br r31, osfpal_calpal_opcdec
4748
4749 CALL_PAL_UNPRIV(0x00BC)
4750CallPal_OpcDecBC:
4751 br r31, osfpal_calpal_opcdec
4752
4753 CALL_PAL_UNPRIV(0x00BD)
4754CallPal_OpcDecBD:
4755 br r31, osfpal_calpal_opcdec
4756
4757 CALL_PAL_UNPRIV(0x00BE)
4758CallPal_OpcDecBE:
4759 br r31, osfpal_calpal_opcdec
4760
4761 CALL_PAL_UNPRIV(0x00BF)
4762CallPal_OpcDecBF:
4763 // MODIFIED BY EGH 2/25/04
4764 br r31, copypal_impl
4765
4766
4767/*======================================================================*/
4768/* OSF/1 CALL_PAL CONTINUATION AREA */
4769/*======================================================================*/
4770
4771 .text 2
4772
4773 . = 0x4000
4774
4775
4776// .sbttl "Continuation of MTPR_PERFMON"
4777 ALIGN_BLOCK
4778#if perfmon_debug == 0
4779 // "real" performance monitoring code
4780// mux ctl
4781perfmon_muxctl:
4782 lda r8, 1(r31) // get a 1
4783 sll r8, pmctr_v_sel0, r8 // move to sel0 position
4784 or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
4785 and r17, r8, r25 // isolate pmctr mux select bits
4786 mfpr r0, ev5__pmctr
4787 bic r0, r8, r0 // clear old mux select bits
4788 or r0,r25, r25 // or in new mux select bits
4789 mtpr r25, ev5__pmctr
4790
4791 // ok, now tackle cbox mux selects
4792 ldah r14, 0xfff0(r31)
4793 zap r14, 0xE0, r14 // Get Cbox IPR base
4794//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
4795// adapted from ev5_pal_macros.mar
4796 mfpr r16, pt_impure
4797 lda r16, CNS_Q_IPR(r16)
4798 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
4799
4800 lda r8, 0x3F(r31) // build mux select mask
4801 sll r8, bc_ctl_v_pm_mux_sel, r8
4802
4803 and r17, r8, r25 // isolate bc_ctl mux select bits
4804 bic r16, r8, r16 // isolate old mux select bits
4805 or r16, r25, r25 // create new bc_ctl
4806 mb // clear out cbox for future ipr write
4807 stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
4808 mb // clear out cbox for future ipr write
4809
4810//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
4811// adapted from ev5_pal_macros.mar
4812 mfpr r16, pt_impure
4813 lda r16, CNS_Q_IPR(r16)
4814 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
4815
4816 br r31, perfmon_success
4817
4818
4819// requested to disable perf monitoring
4820perfmon_dis:
4821 mfpr r14, ev5__pmctr // read ibox pmctr ipr
4822perfmon_dis_ctr0: // and begin with ctr0
4823 blbc r17, perfmon_dis_ctr1 // do not disable ctr0
4824 lda r8, 3(r31)
4825 sll r8, pmctr_v_ctl0, r8
4826 bic r14, r8, r14 // disable ctr0
4827perfmon_dis_ctr1:
4828 srl r17, 1, r17
4829 blbc r17, perfmon_dis_ctr2 // do not disable ctr1
4830 lda r8, 3(r31)
4831 sll r8, pmctr_v_ctl1, r8
4832 bic r14, r8, r14 // disable ctr1
4833perfmon_dis_ctr2:
4834 srl r17, 1, r17
4835 blbc r17, perfmon_dis_update // do not disable ctr2
4836 lda r8, 3(r31)
4837 sll r8, pmctr_v_ctl2, r8
4838 bic r14, r8, r14 // disable ctr2
4839perfmon_dis_update:
4840 mtpr r14, ev5__pmctr // update pmctr ipr
4841//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
4842// adapted from ev5_pal_macros.mar
4843//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
4844 mfpr r25, pt_impure
4845 lda r25, CNS_Q_IPR(r25)
4846 RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
4847
4848 lda r17, 0x3F(r31) // build mask
4849 sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
4850 and r14, r17, r14 // isolate ctl bits
4851 bic r8, r17, r8 // clear out old ctl bits
4852 or r14, r8, r14 // create shadow ctl bits
4853//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
4854//adjusted impure pointer still in r25
4855 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
4856
4857 br r31, perfmon_success
4858
4859
4860// requested to enable perf monitoring
4861//;the following code can be greatly simplified for pass2, but should work fine as is.
4862
4863
4864perfmon_enclr:
4865 lda r9, 1(r31) // set enclr flag
4866 br perfmon_en_cont
4867
4868perfmon_en:
4869 bis r31, r31, r9 // clear enclr flag
4870
4871perfmon_en_cont:
4872 mfpr r8, pt_pcbb // get PCB base
4873//orig get_pmctr_ctl r25, r25
4874 mfpr r25, pt_impure
4875 lda r25, CNS_Q_IPR(r25)
4876 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
4877
4878 ldqp r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
4879 mfpr r14, ev5__pmctr // read ibox pmctr ipr
4880 srl r16, osfpcb_v_pme, r16 // get pme bit
4881 mfpr r13, icsr
4882 and r16, 1, r16 // isolate pme bit
4883
4884 // this code only needed in pass2 and later
4885//orig sget_addr r12, 1<<icsr_v_pmp, r31
4886 lda r12, 1<<icsr_v_pmp(r31) // pb
4887 bic r13, r12, r13 // clear pmp bit
4888 sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
4889 or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
4890 ev5_pass2 mtpr r13, icsr // update icsr
4891
4892#if ev5_p1 != 0
4893 lda r12, 1(r31)
4894 cmovlbc r25, r12, r16 // r16<0> set if either pme=1 or sprocess=0 (sprocess in bit 0 of r25)
4895#else
4896 bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
4897#endif
4898
4899 sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
4900 bis r14, r31, r13 // copy pmctr
4901
4902perfmon_en_ctr0: // and begin with ctr0
4903 blbc r17, perfmon_en_ctr1 // do not enable ctr0
4904
4905 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
4906 lda r8, 0xffff(r31)
4907 zapnot r8, 3, r8 // ctr0<15:0> mask
4908 sll r8, pmctr_v_ctr0, r8
4909 bic r14, r8, r14 // clear ctr bits
4910 bic r13, r8, r13 // clear ctr bits
4911
4912perfmon_en_noclr0:
4913//orig get_addr r8, 3<<pmctr_v_ctl0, r31
4914 LDLI(r8, (3<<pmctr_v_ctl0))
4915 and r25, r8, r12 //isolate frequency select bits for ctr0
4916 bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
4917 or r14,r12,r14 // or in new ctl0 bits
4918
4919perfmon_en_ctr1: // enable ctr1
4920 srl r17, 1, r17 // get ctr1 enable
4921 blbc r17, perfmon_en_ctr2 // do not enable ctr1
4922
4923 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
4924 lda r8, 0xffff(r31)
4925 zapnot r8, 3, r8 // ctr1<15:0> mask
4926 sll r8, pmctr_v_ctr1, r8
4927 bic r14, r8, r14 // clear ctr bits
4928 bic r13, r8, r13 // clear ctr bits
4929
4930perfmon_en_noclr1:
4931//orig get_addr r8, 3<<pmctr_v_ctl1, r31
4932 LDLI(r8, (3<<pmctr_v_ctl1))
4933 and r25, r8, r12 //isolate frequency select bits for ctr1
4934 bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
4935 or r14,r12,r14 // or in new ctl1 bits
4936
4937perfmon_en_ctr2: // enable ctr2
4938 srl r17, 1, r17 // get ctr2 enable
4939 blbc r17, perfmon_en_return // do not enable ctr2 - return
4940
4941 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
4942 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4943 sll r8, pmctr_v_ctr2, r8
4944 bic r14, r8, r14 // clear ctr bits
4945 bic r13, r8, r13 // clear ctr bits
4946
4947perfmon_en_noclr2:
4948//orig get_addr r8, 3<<pmctr_v_ctl2, r31
4949 LDLI(r8, (3<<pmctr_v_ctl2))
4950 and r25, r8, r12 //isolate frequency select bits for ctr2
4951 bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
4952 or r14,r12,r14 // or in new ctl2 bits
4953
4954perfmon_en_return:
4955 cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
4956 // else only do the counter clears
4957 mtpr r13, ev5__pmctr // update pmctr ipr
4958
4959//;this code not needed for pass2 and later, but does not hurt to leave it in
4960 lda r8, 0x3F(r31)
4961//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
4962 mfpr r12, pt_impure
4963 lda r12, CNS_Q_IPR(r12)
4964 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
4965
4966 sll r8, pmctr_v_ctl2, r8 // build ctl mask
4967 and r8, r14, r14 // isolate new ctl bits
4968 bic r25, r8, r25 // clear out old ctl value
4969 or r25, r14, r14 // create new pmctr_ctl
4970//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
4971 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4972
4973 br r31, perfmon_success
4974
4975
4976// options...
4977perfmon_ctl:
4978
4979// set mode
4980//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4981 mfpr r12, pt_impure
4982 lda r12, CNS_Q_IPR(r12)
4983 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4984
4985//orig get_addr r8, (1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk), r31 // build mode mask for pmctr register
4986 LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
4987 mfpr r0, ev5__pmctr
4988 and r17, r8, r25 // isolate pmctr mode bits
4989 bic r0, r8, r0 // clear old mode bits
4990 or r0, r25, r25 // or in new mode bits
4991 mtpr r25, ev5__pmctr
4992
4993//;the following code will only be used in pass2, but should not hurt anything if run in pass1.
4994 mfpr r8, icsr
4995 lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
4996 bic r8, r25, r8 // clear old pma bit
4997 cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
4998 or r8, r25, r8
4999 ev5_pass2 mtpr r8, icsr // 4 bubbles to hw_rei
5000 mfpr r31, pt0 // pad icsr write
5001 mfpr r31, pt0 // pad icsr write
5002
5003//;the following code not needed for pass2 and later, but should work anyway.
5004 bis r14, 1, r14 // set for select processes
5005 blbs r17, perfmon_sp // branch if select processes
5006 bic r14, 1, r14 // all processes
5007perfmon_sp:
5008//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
5009 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
5010 br r31, perfmon_success
5011
5012// counter frequency select
5013perfmon_freq:
5014//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
5015 mfpr r12, pt_impure
5016 lda r12, CNS_Q_IPR(r12)
5017 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
5018
5019 lda r8, 0x3F(r31)
5020//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
5021// I guess this should be a shift of 4 bits from the above control register structure .. pb
5022#define pmctr_ctl_v_frq2_SHIFT 4
5023 sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
5024
5025 and r8, r17, r17
5026 bic r14, r8, r14 // clear out old frequency select bits
5027
5028 or r17, r14, r14 // or in new frequency select info
5029//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
5030 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
5031
5032 br r31, perfmon_success
5033
5034// read counters
5035perfmon_rd:
5036 mfpr r0, ev5__pmctr
5037 or r0, 1, r0 // or in return status
5038 hw_rei // back to user
5039
5040// write counters
5041perfmon_wr:
5042 mfpr r14, ev5__pmctr
5043 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
5044 sll r8, pmctr_v_ctr2, r8
5045
5046//orig get_addr r9, 0xFFFFFFFF, r31, verify=0 // ctr2<15:0>,ctr1<15:0> mask
5047 LDLI(r9, (0xFFFFFFFF))
5048 sll r9, pmctr_v_ctr1, r9
5049 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
5050 bic r14, r8, r14 // clear ctr fields
5051 and r17, r8, r25 // clear all but ctr fields
5052 or r25, r14, r14 // write ctr fields
5053 mtpr r14, ev5__pmctr // update pmctr ipr
5054
5055 mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
5056
5057perfmon_success:
5058 or r31, 1, r0 // set success
5059 hw_rei // back to user
5060
5061perfmon_unknown:
5062 or r31, r31, r0 // set fail
5063 hw_rei // back to user
5064
5065#else
5066
5067// end of "real code", start of debug code
5068
5069//+
5070// Debug environment:
5071// (in pass2, always set icsr<pma> to ensure master counter enable is on)
5072// R16 = 0 Write to on-chip performance monitor ipr
5073// r17 = on-chip ipr
5074// r0 = return value of read of on-chip performance monitor ipr
5075// R16 = 1 Setup Cbox mux selects
5076// r17 = Cbox mux selects in same position as in bc_ctl ipr.
5077// r0 = return value of read of on-chip performance monitor ipr
5078//
5079//-
5080pal_perfmon_debug:
5081 mfpr r8, icsr
5082 lda r9, 1<<icsr_v_pma(r31)
5083 bis r8, r9, r8
5084 mtpr r8, icsr
5085
5086 mfpr r0, ev5__pmctr // read old value
5087 bne r16, cbox_mux_sel
5088
5089 mtpr r17, ev5__pmctr // update pmctr ipr
5090 br r31, end_pm
5091
5092cbox_mux_sel:
5093 // ok, now tackle cbox mux selects
5094 ldah r14, 0xfff0(r31)
5095 zap r14, 0xE0, r14 // Get Cbox IPR base
5096//orig get_bc_ctl_shadow r16 // bc_ctl returned
5097 mfpr r16, pt_impure
5098 lda r16, CNS_Q_IPR(r16)
5099 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
5100
5101 lda r8, 0x3F(r31) // build mux select mask
5102 sll r8, BC_CTL_V_PM_MUX_SEL, r8
5103
5104 and r17, r8, r25 // isolate bc_ctl mux select bits
5105 bic r16, r8, r16 // isolate old mux select bits
5106 or r16, r25, r25 // create new bc_ctl
5107 mb // clear out cbox for future ipr write
5108 stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
5109 mb // clear out cbox for future ipr write
5110//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
5111 mfpr r16, pt_impure
5112 lda r16, CNS_Q_IPR(r16)
5113 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
5114
5115end_pm: hw_rei
5116
5117#endif
5118
5119
5120//;The following code is a workaround for a cpu bug where Istream prefetches to
5121//;super-page address space in user mode may escape off-chip.
5122#if spe_fix != 0
5123
5124 ALIGN_BLOCK
5125hw_rei_update_spe:
5126 mfpr r12, pt_misc // get previous mode
5127 srl r11, osfps_v_mode, r10 // isolate current mode bit
5128 and r10, 1, r10
5129 extbl r12, 7, r8 // get previous mode field
5130 and r8, 1, r8 // isolate previous mode bit
5131 cmpeq r10, r8, r8 // compare previous and current modes
5132 beq r8, hw_rei_update_spe_5_
5133 hw_rei // if same, just return
5134
5135hw_rei_update_spe_5_:
5136
5137#if fill_err_hack != 0
5138
5139 fill_error_hack
5140#endif
5141
5142 mfpr r8, icsr // get current icsr value
5143 ldah r9, (2<<(icsr_v_spe-16))(r31) // get spe bit mask
5144 bic r8, r9, r8 // disable spe
5145 xor r10, 1, r9 // flip mode for new spe bit
5146 sll r9, icsr_v_spe+1, r9 // shift into position
5147 bis r8, r9, r8 // enable/disable spe
5148 lda r9, 1(r31) // now update our flag
5149 sll r9, pt_misc_v_cm, r9 // previous mode saved bit mask
5150 bic r12, r9, r12 // clear saved previous mode
5151 sll r10, pt_misc_v_cm, r9 // current mode saved bit mask
5152 bis r12, r9, r12 // set saved current mode
5153 mtpr r12, pt_misc // update pt_misc
5154 mtpr r8, icsr // update icsr
5155
5156#if osf_chm_fix != 0
5157
5158
5159 blbc r10, hw_rei_update_spe_10_ // branch if not user mode
5160
5161 mb // ensure no outstanding fills
5162 lda r12, 1<<dc_mode_v_dc_ena(r31) // User mode
5163 mtpr r12, dc_mode // Turn on dcache
5164 mtpr r31, dc_flush // and flush it
5165 br r31, pal_ic_flush
5166
5167hw_rei_update_spe_10_: mfpr r9, pt_pcbb // Kernel mode
5168 ldqp r9, osfpcb_q_Fen(r9) // get FEN
5169 blbc r9, pal_ic_flush // return if FP disabled
5170 mb // ensure no outstanding fills
5171 mtpr r31, dc_mode // turn off dcache
5172#endif
5173
5174
5175 br r31, pal_ic_flush // Pal restriction - must flush Icache if changing ICSR<SPE>
5176#endif
5177
5178
5179copypal_impl:
5180 mov r16, r0
5181 ble r18, finished #if len <=0 we are finished
5182 ldq_u r8, 0(r17)
5183 xor r17, r16, r9
5184 and r9, 7, r9
5185 and r16, 7, r10
5186 bne r9, unaligned
5187 beq r10, aligned
5188 ldq_u r9, 0(r16)
5189 addq r18, r10, r18
5190 mskqh r8, r17, r8
5191 mskql r9, r17, r9
5192 bis r8, r9, r8
5193aligned:
5194 subq r18, 1, r10
5195 bic r10, 7, r10
5196 and r18, 7, r18
5197 beq r10, aligned_done
5198loop:
5199 stq_u r8, 0(r16)
5200 ldq_u r8, 8(r17)
5201 subq r10, 8, r10
5202 lda r16,8(r16)
5203 lda r17,8(r17)
5204 bne r10, loop
5205aligned_done:
5206 bne r18, few_left
5207 stq_u r8, 0(r16)
5208 br r31, finished
5209 few_left:
5210 mskql r8, r18, r10
5211 ldq_u r9, 0(r16)
5212 mskqh r9, r18, r9
5213 bis r10, r9, r10
5214 stq_u r10, 0(r16)
5215 br r31, finished
5216unaligned:
5217 addq r17, r18, r25
5218 cmpule r18, 8, r9
5219 bne r9, unaligned_few_left
5220 beq r10, unaligned_dest_aligned
5221 and r16, 7, r10
5222 subq r31, r10, r10
5223 addq r10, 8, r10
5224 ldq_u r9, 7(r17)
5225 extql r8, r17, r8
5226 extqh r9, r17, r9
5227 bis r8, r9, r12
5228 insql r12, r16, r12
5229 ldq_u r13, 0(r16)
5230 mskql r13, r16, r13
5231 bis r12, r13, r12
5232 stq_u r12, 0(r16)
5233 addq r16, r10, r16
5234 addq r17, r10, r17
5235 subq r18, r10, r18
5236 ldq_u r8, 0(r17)
5237unaligned_dest_aligned:
5238 subq r18, 1, r10
5239 bic r10, 7, r10
5240 and r18, 7, r18
5241 beq r10, unaligned_partial_left
5242unaligned_loop:
5243 ldq_u r9, 7(r17)
5244 lda r17, 8(r17)
5245 extql r8, r17, r12
5246 extqh r9, r17, r13
5247 subq r10, 8, r10
5248 bis r12, r13, r13
5249 stq r13, 0(r16)
5250 lda r16, 8(r16)
5251 beq r10, unaligned_second_partial_left
5252 ldq_u r8, 7(r17)
5253 lda r17, 8(r17)
5254 extql r9, r17, r12
5255 extqh r8, r17, r13
5256 bis r12, r13, r13
5257 subq r10, 8, r10
5258 stq r13, 0(r16)
5259 lda r16, 8(r16)
5260 bne r10, unaligned_loop
5261unaligned_partial_left:
5262 mov r8, r9
5263unaligned_second_partial_left:
5264 ldq_u r8, -1(r25)
5265 extql r9, r17, r9
5266 extqh r8, r17, r8
5267 bis r8, r9, r8
5268 bne r18, few_left
5269 stq_u r8, 0(r16)
5270 br r31, finished
5271unaligned_few_left:
5272 ldq_u r9, -1(r25)
5273 extql r8, r17, r8
5274 extqh r9, r17, r9
5275 bis r8, r9, r8
5276 insqh r8, r16, r9
5277 insql r8, r16, r8
5278 lda r12, -1(r31)
5279 mskql r12, r18, r13
5280 cmovne r13, r13, r12
5281 insqh r12, r16, r13
5282 insql r12, r16, r12
5283 addq r16, r18, r10
5284 ldq_u r14, 0(r16)
5285 ldq_u r25, -1(r10)
5286 bic r14, r12, r14
5287 bic r25, r13, r25
5288 and r8, r12, r8
5289 and r9, r13, r9
5290 bis r8, r14, r8
5291 bis r9, r25, r9
5292 stq_u r9, -1(r10)
5293 stq_u r8, 0(r16)
5294finished:
5295 hw_rei