Deleted Added
sdiff udiff text old ( 8007:013cbe16f1d6 ) new ( 8012:2f71125bf413 )
full compact
1/*
2 * Copyright (c) 2003, 2004
3 * The Regents of The University of Michigan
4 * All Rights Reserved
5 *
6 * This code is part of the M5 simulator, developed by Nathan Binkert,
7 * Erik Hallnor, Steve Raasch, and Steve Reinhardt, with contributions
8 * from Ron Dreslinski, Dave Greene, Lisa Hsu, Ali Saidi, and Andrew
9 * Schultz.
10 *
11 * Permission is granted to use, copy, create derivative works and
12 * redistribute this software and such derivative works for any
13 * purpose, so long as the copyright notice above, this grant of
14 * permission, and the disclaimer below appear in all copies made; and
15 * so long as the name of The University of Michigan is not used in
16 * any advertising or publicity pertaining to the use or distribution
17 * of this software without specific, written prior authorization.
18 *
19 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE
20 * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND
21 * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE
25 * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT,
26 * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM
27 * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
28 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH
29 * DAMAGES.
30 */
31
32/*
33Copyright 1992, 1993, 1994, 1995 Hewlett-Packard Development Company, L.P.
34
35Permission is hereby granted, free of charge, to any person obtaining a copy of
36this software and associated documentation files (the "Software"), to deal in
37the Software without restriction, including without limitation the rights to
38use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
39of the Software, and to permit persons to whom the Software is furnished to do
40so, subject to the following conditions:
41
42The above copyright notice and this permission notice shall be included in all
43copies or substantial portions of the Software.
44
45THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
46IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
47FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
48AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
49LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
50OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
51SOFTWARE.
52*/
53
54// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
55// since we don't have a mechanism to expand the data structures.... pb Nov/95
56
57// build_fixed_image: not sure what means
58// real_mm to be replaced during rewrite
59// remove_save_state remove_restore_state can be remooved to save space ??
60
61
62#include "ev5_defs.h"
63#include "ev5_impure.h"
64#include "ev5_alpha_defs.h"
65#include "ev5_paldef.h"
66#include "ev5_osfalpha_defs.h"
67#include "fromHudsonMacros.h"
68#include "fromHudsonOsf.h"
69#include "dc21164FromGasSources.h"
70
71#ifdef SIMOS
72#define DEBUGSTORE(c) nop
73#else
74#define DEBUGSTORE(c) \
75 lda r13, c(zero) ; \
76 bsr r25, debugstore
77#endif
78
79#define DEBUG_EXC_ADDR()\
80 bsr r25, put_exc_addr; \
81 DEBUGSTORE(13) ; \
82 DEBUGSTORE(10)
83
84#define egore 0
85#define acore 0
86#define beh_model 0
87#define ev5_p2 1
88#define ev5_p1 0
89#define ldvpte_bug_fix 1
90#define osf_chm_fix 0
91
92// Do we want to do this?? pb
93#define spe_fix 0
94// Do we want to do this?? pb
95#define build_fixed_image 0
96
97#define ev5_pass2
98#define enable_p4_fixups 0
99#define osf_svmin 1
100#define enable_physical_console 0
101#define fill_err_hack 0
102#define icflush_on_tbix 0
103#define max_cpuid 1
104#define perfmon_debug 0
105#define rawhide_system 0
106#define rax_mode 0
107
108
109// This is the fix for the user-mode super page references causing the machine to crash.
110#if (spe_fix == 1) && (build_fixed_image==1)
111#define hw_rei_spe br r31, hw_rei_update_spe
112#else
113#define hw_rei_spe hw_rei
114#endif
115
116
117// redefine a few of the distribution-code names to match the Hudson gas names.
118// opcodes
119#define ldqp ldq_p
120#define stqp stq_p
121#define ldlp ldl_p
122#define stlp stl_p
123
124#define r0 $0
125#define r1 $1
126#define r2 $2
127#define r3 $3
128#define r4 $4
129#define r5 $5
130#define r6 $6
131#define r7 $7
132#define r8 $8
133#define r9 $9
134#define r10 $10
135#define r11 $11
136#define r12 $12
137#define r13 $13
138#define r14 $14
139#define r15 $15
140#define r16 $16
141#define r17 $17
142#define r18 $18
143#define r19 $19
144#define r20 $20
145#define r21 $21
146#define r22 $22
147#define r23 $23
148#define r24 $24
149#define r25 $25
150#define r26 $26
151#define r27 $27
152#define r28 $28
153#define r29 $29
154#define r30 $30
155#define r31 $31
156
157// .title "EV5 OSF PAL"
158// .ident "V1.18"
159//
160//****************************************************************************
161//* *
162//* Copyright (c) 1992, 1993, 1994, 1995 *
163//* by DIGITAL Equipment Corporation, Maynard, Mass. *
164//* *
165//* This software is furnished under a license and may be used and copied *
166//* only in accordance with the terms of such license and with the *
167//* inclusion of the above copyright notice. This software or any other *
168//* copies thereof may not be provided or otherwise made available to any *
169//* other person. No title to and ownership of the software is hereby *
170//* transferred. *
171//* *
172//* The information in this software is subject to change without notice *
173//* and should not be construed as a commitment by DIGITAL Equipment *
174//* Corporation. *
175//* *
176//* DIGITAL assumes no responsibility for the use or reliability of its *
177//* software on equipment which is not supplied by DIGITAL. *
178//* *
179//****************************************************************************
180
181// .sbttl "Edit History"
182//+
183// Who Rev When What
184// ------------ --- ----------- --------------------------------
185// DB 0.0 03-Nov-1992 Start
186// DB 0.1 28-Dec-1992 add swpctx
187// DB 0.2 05-Jan-1993 Bug: PVC found mtpr dtb_CM -> virt ref bug
188// DB 0.3 11-Jan-1993 rearrange trap entry points
189// DB 0.4 01-Feb-1993 add tbi
190// DB 0.5 04-Feb-1993 real MM, kludge reset flow, kludge swppal
191// DB 0.6 09-Feb-1993 Bug: several stack pushers used r16 for pc (should be r14)
192// DB 0.7 10-Feb-1993 Bug: pushed wrong PC (+8) on CALL_PAL OPCDEC
193// Bug: typo on register number for store in wrunique
194// Bug: rti to kern uses r16 as scratch
195// Bug: callsys saving wrong value in pt_usp
196// DB 0.8 16-Feb-1993 PVC: fix possible pt write->read bug in wrkgp, wrusp
197// DB 0.9 18-Feb-1993 Bug: invalid_dpte_handler shifted pte twice
198// Bug: rti stl_c could corrupt the stack
199// Bug: unaligned returning wrong value in r17 (or should be and)
200// DB 0.10 19-Feb-1993 Add draina, rd/wrmces, cflush, cserve, interrupt
201// DB 0.11 23-Feb-1993 Turn caches on in reset flow
202// DB 0.12 10-Mar-1993 Bug: wrong value for icsr for FEN in kern mode flow
203// DB 0.13 15-Mar-1993 Bug: wrong value pushed for PC in invalid_dpte_handler if stack push tbmisses
204// DB 0.14 23-Mar-1993 Add impure pointer paltemp, reshuffle some other paltemps to match VMS
205// DB 0.15 15-Apr-1993 Combine paltemps for WHAMI and MCES
206// DB 0.16 12-May-1993 Update reset
207// New restriction: no mfpr exc_addr in cycle 1 of call_pal flows
208// Bug: in wrmces, not clearing DPC, DSC
209// Update swppal
210// Add pal bugchecks, pal_save_state, pal_restore_state
211// DB 0.17 24-May-1993 Add dfault_in_pal flow; fixup stack builder to have common state for pc/ps.
212// New restriction: No hw_rei_stall in 0,1,2 after mtpr itb_asn
213// DB 0.18 26-May-1993 PVC fixes
214// JM 0.19 01-jul-1993 Bug: OSFPAL_CALPAL_OPCDEC, TRAP_OPCDEC -- move mt exc_addr after stores
215// JM 0.20 07-jul-1993 Update cns_ and mchk_ names for impure.mar conversion to .sdl
216// Bug: exc_addr was being loaded before stores that could dtb_miss in the following
217// routines: TRAP_FEN,FEN_TO_OPCDEC,CALL_PAL_CALLSYS,RTI_TO_KERN
218// JM 0.21 26-jul-1993 Bug: move exc_addr load after ALL stores in the following routines:
219// TRAP_IACCVIO::,TRAP_OPCDEC::,TRAP_ARITH::,TRAP_FEN::
220// dfault_trap_cont:,fen_to_opcdec:,invalid_dpte_handler:
221// osfpal_calpal_opcdec:,CALL_PAL_callsys::,TRAP_UNALIGN::
222// Bugs from PVC: trap_unalign - mt pt0 ->mf pt0 within 2 cycles
223// JM 0.22 28-jul-1993 Add WRIPIR instruction
224// JM 0.23 05-aug-1993 Bump version number for release
225// JM 0.24 11-aug-1993 Bug: call_pal_swpipl - palshadow write -> hw_rei violation
226// JM 0.25 09-sep-1993 Disable certain "hidden" pvc checks in call_pals;
227// New restriction: No hw_rei_stall in 0,1,2,3,4 after mtpr itb_asn - affects HALT(raxmode),
228// and SWPCTX
229// JM 0.26 07-oct-1993 Re-implement pal_version
230// JM 0.27 12-oct-1993 One more time: change pal_version format to conform to SRM
231// JM 0.28 14-oct-1993 Change ic_flush routine to pal_ic_flush
232// JM 0.29 19-oct-1993 BUG(?): dfault_in_pal: use exc_addr to check for dtbmiss,itbmiss check instead
233// of mm_stat<opcode>. mm_stat contains original opcode, not hw_ld.
234// JM 0.30 28-oct-1993 BUG: PVC violation - mf exc_addr in first cycles of call_pal in rti,retsys
235// JM 0.31 15-nov-1993 BUG: WRFEN trashing r0
236// JM 0.32 21-nov-1993 BUG: dtb_ldq,itb_ldq (used in dfault_in_pal) not defined when real_mm=0
237// JM 0.33 24-nov-1993 save/restore_state -
238// BUG: use ivptbr to restore mvptbr
239// BUG: adjust hw_ld/st base/offsets to accomodate 10-bit offset limit
240// CHANGE: Load 2 pages into dtb to accomodate compressed logout area/multiprocessors
241// JM 0.34 20-dec-1993 BUG: set r11<mode> to kernel for ksnv halt case
242// BUG: generate ksnv halt when tb miss on kernel stack accesses
243// save exc_addr in r14 for invalid_dpte stack builder
244// JM 0.35 30-dec-1993 BUG: PVC violation in trap_arith - mt exc_sum in shadow of store with mf exc_mask in
245// the same shadow
246// JM 0.36 6-jan-1994 BUG: fen_to_opcdec - savePC should be PC+4, need to save old PS, update new PS
247// New palcode restiction: mt icsr<fpe,hwe> --> 3 bubbles to hw_rei --affects wrfen
248// JM 0.37 25-jan-1994 BUG: PVC violations in restore_state - mt dc_mode/maf_mode ->mbox instructions
249// Hide impure area manipulations in macros
250// BUG: PVC violation in save and restore state-- move mt icsr out of shadow of ld/st
251// Add some pvc_violate statements
252// JM 0.38 1-feb-1994 Changes to save_state: save pt1; don't save r31,f31; update comments to reflect reality;
253// Changes to restore_state: restore pt1, icsr; don't restore r31,f31; update comments
254// Add code to ensure fen bit set in icsr before ldt
255// conditionally compile rax_more_reset out.
256// move ldqp,stqp macro definitions to ev5_pal_macros.mar and add .mcall's for them here
257// move rax reset stuff to ev5_osf_system_pal.m64
258// JM 0.39 7-feb-1994 Move impure pointer to pal scratch space. Use former pt_impure for bc_ctl shadow
259// and performance monitoring bits
260// Change to save_state routine to save more iprs.
261// JM 0.40 19-feb-1994 Change algorithm in save/restore_state routines; add f31,r31 back in
262// JM 0.41 21-feb-1994 Add flags to compile out save/restore state (not needed in some systems)
263// remove_save_state,remove_restore_state;fix new pvc violation in save_state
264// JM 0.42 22-feb-1994 BUG: save_state overwriting r3
265// JM 0.43 24-feb-1994 BUG: save_state saving wrong icsr
266// JM 0.44 28-feb-1994 Remove ic_flush from wr_tbix instructions
267// JM 0.45 15-mar-1994 BUG: call_pal_tbi trashes a0 prior to range check (instruction order problem)
268// New pal restriction in pal_restore_state: icsr<fpe>->floating instr = 3 bubbles
269// Add exc_sum and exc_mask to pal_save_state (not restore)
270// JM 0.46 22-apr-1994 Move impure pointer back into paltemp; Move bc_ctl shadow and pmctr_ctl into impure
271// area.
272// Add performance counter support to swpctx and wrperfmon
273// JM 0.47 9-may-1994 Bump version # (for ev5_osf_system_pal.m64 sys_perfmon fix)
274// JM 0.48 13-jun-1994 BUG: trap_interrupt --> put new ev5 ipl at 30 for all osfipl6 interrupts
275// JM 0.49 8-jul-1994 BUG: In the unlikely (impossible?) event that the branch to pal_pal_bug_check is
276// taken in the interrupt flow, stack is pushed twice.
277// SWPPAL - update to support ECO 59 to allow 0 as a valid address
278// Add itb flush to save/restore state routines
279// Change hw_rei to hw_rei_stall in ic_flush routine. Shouldn't be necessary, but
280// conforms to itbia restriction.
281// Added enable_physical_console flag (for enter/exit console routines only)
282// JM 0.50 29-jul-1994 Add code to dfault & invalid_dpte_handler to ignore exceptions on a
283// load to r31/f31. changed dfault_fetch_err to dfault_fetch_ldr31_err and
284// nmiss_fetch_err to nmiss_fetch_ldr31_err.
285// JM 1.00 1-aug-1994 Add pass2 support (swpctx)
286// JM 1.01 2-aug-1994 swppal now passes bc_ctl/bc_config in r1/r2
287// JM 1.02 15-sep-1994 BUG: swpctx missing shift of pme bit to correct position in icsr (pass2)
288// Moved perfmon code here from system file.
289// BUG: pal_perfmon - enable function not saving correct enables when pme not set (pass1)
290// JM 1.03 3-oct-1994 Added (pass2 only) code to wrperfmon enable function to look at pme bit.
291// JM 1.04 14-oct-1994 BUG: trap_interrupt - ISR read (and saved) before INTID -- INTID can change
292// after ISR read, but we won't catch the ISR update. reverse order
293// JM 1.05 17-nov-1994 Add code to dismiss UNALIGN trap if LD r31/F31
294// JM 1.06 28-nov-1994 BUG: missing mm_stat shift for store case in trap_unalign (new bug due to "dismiss" code)
295// JM 1.07 1-dec-1994 EV5 PASS1,2,3 BUG WORKAROUND: Add flag LDVPTE_BUG_FIX. In DTBMISS_DOUBLE, branch to
296// DTBMISS_SINGLE if not in palmode.
297// JM 1.08 9-jan-1995 Bump version number for change to EV5_OSF_SYSTEM_PAL.M64 - ei_stat fix in mchk logout frame
298// JM 1.09 2-feb-1995 Add flag "spe_fix" and accompanying code to workaround pre-pass4 bug: Disable Ibox
299// superpage mode in User mode and re-enable in kernel mode.
300// EV5_OSF_SYSTEM_PAL.M64 and EV5_PALDEF.MAR (added pt_misc_v_cm) also changed to support this.
301// JM 1.10 24-feb-1995 Set ldvpte_bug_fix regardless of ev5 pass. set default to ev5_p2
302// ES 1.11 10-mar-1995 Add flag "osf_chm_fix" to enable dcache in user mode only to avoid
303// cpu bug.
304// JM 1.12 17-mar-1995 BUG FIX: Fix F0 corruption problem in pal_restore_state
305// ES 1.13 17-mar-1995 Refine osf_chm_fix
306// ES 1.14 20-mar-1995 Don't need as many stalls before hw_rei_stall in chm_fix
307// ES 1.15 21-mar-1995 Add a stall to avoid a pvc violation in pal_restore_state
308// Force pvc checking of exit_console
309// ES 1.16 26-apr-1995 In the wrperfmon disable function, correct meaning of R17<2:0> to ctl2,ctl2,ctl0
310// ES 1.17 01-may-1995 In hw_rei_update_spe code, in the osf_chm fix, use bic and bis (self-correcting)
311// instead of xor to maintain previous mode in pt_misc
312// ES 1.18 14-jul-1995 In wrperfmon enable on pass2, update pmctr even if current process does
313// not have pme set. The bits in icsr maintain the master enable state.
314// In sys_reset, add icsr<17>=1 for ev56 byte/word eco enable
315//
316#define vmaj 1
317#define vmin 18
318#define vms_pal 1
319#define osf_pal 2
320#define pal_type osf_pal
321#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
322//-
323
324// .sbttl "PALtemp register usage"
325
326//+
327// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
328// for these PALtemps:
329//
330// pt0 local scratch
331// pt1 local scratch
332// pt2 entUna pt_entUna
333// pt3 CPU specific impure area pointer pt_impure
334// pt4 memory management temp
335// pt5 memory management temp
336// pt6 memory management temp
337// pt7 entIF pt_entIF
338// pt8 intmask pt_intmask
339// pt9 entSys pt_entSys
340// pt10
341// pt11 entInt pt_entInt
342// pt12 entArith pt_entArith
343// pt13 reserved for system specific PAL
344// pt14 reserved for system specific PAL
345// pt15 reserved for system specific PAL
346// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami, pt_mces
347// pt17 sysval pt_sysval
348// pt18 usp pt_usp
349// pt19 ksp pt_ksp
350// pt20 PTBR pt_ptbr
351// pt21 entMM pt_entMM
352// pt22 kgp pt_kgp
353// pt23 PCBB pt_pcbb
354//
355//-
356
357// .sbttl "PALshadow register usage"
358//
359//+
360//
361// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
362// This maps the OSF PAL usage of R8 - R14 and R25:
363//
364// r8 ITBmiss/DTBmiss scratch
365// r9 ITBmiss/DTBmiss scratch
366// r10 ITBmiss/DTBmiss scratch
367// r11 PS
368// r12 local scratch
369// r13 local scratch
370// r14 local scratch
371// r25 local scratch
372//
373//
374//-
375
376// .sbttl "ALPHA symbol definitions"
377// _OSF_PSDEF GLOBAL
378// _OSF_PTEDEF GLOBAL
379// _OSF_VADEF GLOBAL
380// _OSF_PCBDEF GLOBAL
381// _OSF_SFDEF GLOBAL
382// _OSF_MMCSR_DEF GLOBAL
383// _SCBDEF GLOBAL
384// _FRMDEF GLOBAL
385// _EXSDEF GLOBAL
386// _OSF_A0_DEF GLOBAL
387// _MCESDEF GLOBAL
388
389// .sbttl "EV5 symbol definitions"
390
391// _EV5DEF
392// _PALTEMP
393// _MM_STAT_DEF
394// _EV5_MM
395// _EV5_IPLDEF
396
397// _HALT_CODES GLOBAL
398// _MCHK_CODES GLOBAL
399
400// _PAL_IMPURE
401// _PAL_LOGOUT
402
403
404
405
406// .sbttl "PALcode configuration options"
407
408// There are a number of options that may be assembled into this version of
409// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
410// the following). The options that can be adjusted cause the resultant PALcode
411// to reflect the desired target system.
412
413
414#define osfpal 1 // This is the PALcode for OSF.
415
416#ifndef rawhide_system
417
418#define rawhide_system 0
419#endif
420
421
422#ifndef real_mm
423// Page table translation vs 1-1 mapping
424#define real_mm 1
425#endif
426
427
428#ifndef rax_mode
429
430#define rax_mode 0
431#endif
432
433#ifndef egore
434// End of reset flow starts a program at 200000(hex).
435#define egore 1
436#endif
437
438#ifndef acore
439// End of reset flow starts a program at 40000(hex).
440#define acore 0
441#endif
442
443
444// assume acore+egore+rax_mode lt 2 // Assertion checker
445
446#ifndef beh_model
447// EV5 behavioral model specific code
448#define beh_model 1
449#endif
450
451#ifndef init_cbox
452// Reset flow init of Bcache and Scache
453#define init_cbox 1
454#endif
455
456#ifndef disable_crd
457// Decides whether the reset flow will disable
458#define disable_crd 0
459#endif
460
461 // correctable read interrupts via ICSR
462#ifndef perfmon_debug
463#define perfmon_debug 0
464#endif
465
466#ifndef icflush_on_tbix
467#define icflush_on_tbix 0
468#endif
469
470#ifndef remove_restore_state
471#define remove_restore_state 0
472#endif
473
474#ifndef remove_save_state
475#define remove_save_state 0
476#endif
477
478#ifndef enable_physical_console
479#define enable_physical_console 0
480#endif
481
482#ifndef ev5_p1
483#define ev5_p1 0
484#endif
485
486#ifndef ev5_p2
487#define ev5_p2 1
488#endif
489
490// assume ev5_p1+ev5_p2 eq 1
491
492#ifndef ldvpte_bug_fix
493#define ldvpte_bug_fix 1 // If set, fix ldvpte bug in dtbmiss_double flow.
494#endif
495
496#ifndef spe_fix
497// If set, disable super-page mode in user mode and re-enable
498#define spe_fix 0
499#endif
500 // in kernel. Workaround for cpu bug.
501#ifndef build_fixed_image
502#define build_fixed_image 0
503#endif
504
505
506#ifndef fill_err_hack
507// If set, disable fill_error mode in user mode and re-enable
508#define fill_err_hack 0
509#endif
510
511 // in kernel. Workaround for cpu bug.
512
513// .macro hw_rei_spe
514// .iif eq spe_fix, hw_rei
515//#if spe_fix != 0
516//
517//
518//#define hw_rei_chm_count hw_rei_chm_count + 1
519// p4_fixup_label \hw_rei_chm_count
520// .iif eq build_fixed_image, br r31, hw_rei_update_spe
521// .iif ne build_fixed_image, hw_rei
522//#endif
523//
524// .endm
525
526// Add flag "osf_chm_fix" to enable dcache in user mode only
527// to avoid cpu bug.
528
529#ifndef osf_chm_fix
530// If set, enable D-Cache in
531#define osf_chm_fix 0
532#endif
533
534#if osf_chm_fix != 0
535// user mode only.
536#define hw_rei_chm_count 0
537#endif
538
539#if osf_chm_fix != 0
540
541#define hw_rei_stall_chm_count 0
542#endif
543
544#ifndef enable_p4_fixups
545
546#define enable_p4_fixups 0
547#endif
548
549 // If set, do EV5 Pass 4 fixups
550#if spe_fix == 0
551
552#define osf_chm_fix 0
553#endif
554
555#if spe_fix == 0
556
557#define enable_p4_fixups 0
558#endif
559
560 // Only allow fixups if fix enabled
561
562 //Turn off fill_errors and MEM_NEM in user mode
563// .macro fill_error_hack ?L10_, ?L20_, ?L30_, ?L40_
564// //save r22,r23,r24
565// stqp r22, 0x150(r31) //add
566// stqp r23, 0x158(r31) //contents
567// stqp r24, 0x160(r31) //bit mask
568//
569// lda r22, 0x82(r31)
570// ldah r22, 0x8740(r22)
571// sll r22, 8, r22
572// ldlp r23, 0x80(r22) // r23 <- contents of CIA_MASK
573// bis r23,r31,r23
574//
575// lda r24, 0x8(r31) // r24 <- MEM_NEM bit
576// beq r10, L10_ // IF user mode (r10<0> == 0) pal mode
577// bic r23, r24, r23 // set fillerr_en bit
578// br r31, L20_ // ELSE
579//L10_: bis r23, r24, r23 // clear fillerr_en bit
580//L20_: // ENDIF
581//
582// stlp r23, 0x80(r22) // write back the CIA_MASK register
583// mb
584// ldlp r23, 0x80(r22)
585// bis r23,r31,r23
586// mb
587//
588// lda r22, 1(r31) // r22 <- 87.4000.0100 ptr to CIA_CTRL
589// ldah r22, 0x8740(r22)
590// sll r22, 8, r22
591// ldlp r23, 0(r22) // r23 <- contents of CIA_CTRL
592// bis r23,r31,r23
593//
594//
595// lda r24, 0x400(r31) // r9 <- fillerr_en bit
596// beq r10, L30_ // IF user mode (r10<0> == 0) pal mode
597// bic r23, r24, r23 // set fillerr_en bit
598// br r31, L40_ // ELSE
599//L30_: bis r23, r24, r23 // clear fillerr_en bit
600//L40_: // ENDIF
601//
602// stlp r23, 0(r22) // write back the CIA_CTRL register
603// mb
604// ldlp r23, 0(r22)
605// bis r23,r31,r23
606// mb
607//
608// //restore r22,r23,r24
609// ldqp r22, 0x150(r31)
610// ldqp r23, 0x158(r31)
611// ldqp r24, 0x160(r31)
612//
613// .endm
614
615// multiprocessor support can be enabled for a max of n processors by
616// setting the following to the number of processors on the system.
617// Note that this is really the max cpuid.
618
619#ifndef max_cpuid
620#define max_cpuid 8
621#endif
622
623#ifndef osf_svmin // platform specific palcode version number
624#define osf_svmin 0
625#endif
626
627
628#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
629
630// .mcall ldqp // override macro64 definition with macro from library
631// .mcall stqp // override macro64 definition with macro from library
632
633
634// .psect _pal,mix
635// huh pb pal_base:
636// huh pb #define current_block_base . - pal_base
637
638// .sbttl "RESET - Reset Trap Entry Point"
639//+
640// RESET - offset 0000
641// Entry:
642// Vectored into via hardware trap on reset, or branched to
643// on swppal.
644//
645// r0 = whami
646// r1 = pal_base
647// r2 = base of scratch area
648// r3 = halt code
649//
650//
651// Function:
652//
653//-
654
655 .text 0
656 . = 0x0000
657 .globl Pal_Base
658Pal_Base:
659 HDW_VECTOR(PAL_RESET_ENTRY)
660Trap_Reset:
661 nop
662#ifdef SIMOS
663 /*
664 * store into r1
665 */
666 br r1,sys_reset
667#else
668 /* following is a srcmax change */
669
670 DEBUGSTORE(0x41)
671 /* The original code jumped using r1 as a linkage register to pass the base
672 of PALcode to the platform specific code. We use r1 to pass a parameter
673 from the SROM, so we hardcode the address of Pal_Base in platform.s
674 */
675 br r31, sys_reset
676#endif
677
678 // Specify PAL version info as a constant
679 // at a known location (reset + 8).
680
681 .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
682 .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
683 .long 0
684 .long 0
685pal_impure_start:
686 .quad 0
687pal_debug_ptr:
688 .quad 0 // reserved for debug pointer ; 20
689#if beh_model == 0
690
691
692#if enable_p4_fixups != 0
693
694
695 .quad 0
696 .long p4_fixup_hw_rei_fixup_table
697#endif
698
699#else
700
701 .quad 0 //
702 .quad 0 //0x0030
703 .quad 0
704 .quad 0 //0x0040
705 .quad 0
706 .quad 0 //0x0050
707 .quad 0
708 .quad 0 //0x0060
709 .quad 0
710pal_enter_cns_address:
711 .quad 0 //0x0070 -- address to jump to from enter_console
712 .long <<sys_exit_console-pal_base>+1> //0x0078 -- offset to sys_exit_console (set palmode bit)
713#endif
714
715
716
717
718// .sbttl "IACCVIO- Istream Access Violation Trap Entry Point"
719
720//+
721// IACCVIO - offset 0080
722// Entry:
723// Vectored into via hardware trap on Istream access violation or sign check error on PC.
724//
725// Function:
726// Build stack frame
727// a0 <- Faulting VA
728// a1 <- MMCSR (1 for ACV)
729// a2 <- -1 (for ifetch fault)
730// vector via entMM
731//-
732
733 HDW_VECTOR(PAL_IACCVIO_ENTRY)
734Trap_Iaccvio:
735 DEBUGSTORE(0x42)
736 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
737 mtpr r31, ev5__ps // Set Ibox current mode to kernel
738
739 bis r11, r31, r12 // Save PS
740 bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
741
742
743 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
744 // no virt ref for next 2 cycles
745 mtpr r30, pt_usp // save user stack
746
747 bis r31, r31, r12 // Set new PS
748 mfpr r30, pt_ksp
749
750TRAP_IACCVIO_10_:
751 lda sp, 0-osfsf_c_size(sp)// allocate stack space
752 mfpr r14, exc_addr // get pc
753
754 stq r16, osfsf_a0(sp) // save regs
755 bic r14, 3, r16 // pass pc/va as a0
756
757 stq r17, osfsf_a1(sp) // a1
758 or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
759
760 stq r18, osfsf_a2(sp) // a2
761 mfpr r13, pt_entmm // get entry point
762
763 stq r11, osfsf_ps(sp) // save old ps
764 bis r12, r31, r11 // update ps
765
766 stq r16, osfsf_pc(sp) // save pc
767 stq r29, osfsf_gp(sp) // save gp
768
769 mtpr r13, exc_addr // load exc_addr with entMM
770 // 1 cycle to hw_rei
771 mfpr r29, pt_kgp // get the kgp
772
773 subq r31, 1, r18 // pass flag of istream, as a2
774 hw_rei_spe
775
776
777// .sbttl "INTERRUPT- Interrupt Trap Entry Point"
778
779//+
780// INTERRUPT - offset 0100
781// Entry:
782// Vectored into via trap on hardware interrupt
783//
784// Function:
785// check for halt interrupt
786// check for passive release (current ipl geq requestor)
787// if necessary, switch to kernel mode
788// push stack frame, update ps (including current mode and ipl copies), sp, and gp
789// pass the interrupt info to the system module
790//
791//-
792
793
794 HDW_VECTOR(PAL_INTERRUPT_ENTRY)
795Trap_Interrupt:
796 mfpr r13, ev5__intid // Fetch level of interruptor
797 mfpr r25, ev5__isr // Fetch interrupt summary register
798
799 srl r25, isr_v_hlt, r9 // Get HLT bit
800 mfpr r14, ev5__ipl
801
802 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
803 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
804
805 cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
806 bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
807
808 and r11, osfps_m_mode, r10 // get mode bit
809 beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
810
811 mtpr r30, pt_usp // save user stack
812 mfpr r30, pt_ksp // get kern stack
813
814TRAP_INTERRUPT_10_:
815 lda sp, (0-osfsf_c_size)(sp)// allocate stack space
816 mfpr r14, exc_addr // get pc
817
818 stq r11, osfsf_ps(sp) // save ps
819 stq r14, osfsf_pc(sp) // save pc
820
821 stq r29, osfsf_gp(sp) // push gp
822 stq r16, osfsf_a0(sp) // a0
823
824// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
825 mtpr r31, ev5__ps // Set Ibox current mode to kernel
826 stq r17, osfsf_a1(sp) // a1
827
828 stq r18, osfsf_a2(sp) // a2
829 subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
830
831 srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
832 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
833
834 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
835 bis r12, r31, r11 // set new ps
836
837 mfpr r12, pt_intmask
838 and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
839
840#ifdef SIMOS
841 /*
842 * Lance had space problems. We don't.
843 */
844 extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
845 mfpr r29, pt_kgp // update gp
846 mtpr r14, ev5__ipl // load the new IPL into Ibox
847#else
848// Moved the following three lines to sys_interrupt to make room for debug
849// extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
850// mfpr r29, pt_kgp // update gp
851
852// mtpr r14, ev5__ipl // load the new IPL into Ibox
853#endif
854 br r31, sys_interrupt // Go handle interrupt
855
856
857
858// .sbttl "ITBMISS- Istream TBmiss Trap Entry Point"
859
860//+
861// ITBMISS - offset 0180
862// Entry:
863// Vectored into via hardware trap on Istream translation buffer miss.
864//
865// Function:
866// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
867// Can trap into DTBMISS_DOUBLE.
868// This routine can use the PALshadow registers r8, r9, and r10
869//
870//-
871
872 HDW_VECTOR(PAL_ITB_MISS_ENTRY)
873Trap_Itbmiss:
874#if real_mm == 0
875
876
877 // Simple 1-1 va->pa mapping
878
879 nop // Pad to align to E1
880 mfpr r8, exc_addr
881
882 srl r8, page_offset_size_bits, r9
883 sll r9, 32, r9
884
885 lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
886 mtpr r9, itb_pte // E1
887
888 hw_rei_stall // Nital says I don't have to obey shadow wait rule here.
889#else
890
891 // Real MM mapping
892 nop
893 mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
894
895 nop
896 mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
897
898pal_itb_ldq:
899 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
900 mtpr r10, exc_addr // Restore exc_address if there was a trap.
901
902 mfpr r31, ev5__va // Unlock VA in case there was a double miss
903 nop
904
905 and r8, osfpte_m_foe, r25 // Look for FOE set.
906 blbc r8, invalid_ipte_handler // PTE not valid.
907
908 nop
909 bne r25, foe_ipte_handler // FOE is set
910
911 nop
912 mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
913
914 hw_rei_stall //
915
916#endif
917
918
919
920
921// .sbttl "DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point"
922
923//+
924// DTBMISS_SINGLE - offset 0200
925// Entry:
926// Vectored into via hardware trap on Dstream single translation buffer miss.
927//
928// Function:
929// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
930// Can trap into DTBMISS_DOUBLE.
931// This routine can use the PALshadow registers r8, r9, and r10
932//-
933
934 HDW_VECTOR(PAL_DTB_MISS_ENTRY)
935Trap_Dtbmiss_Single:
936#if real_mm == 0
937 // Simple 1-1 va->pa mapping
938 mfpr r8, va // E0
939 srl r8, page_offset_size_bits, r9
940
941 sll r9, 32, r9
942 lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
943
944 mtpr r9, dtb_pte // E0
945 nop // Pad to align to E0
946
947
948
949 mtpr r8, dtb_tag // E0
950 nop
951
952 nop // Pad tag write
953 nop
954
955 nop // Pad tag write
956 nop
957
958 hw_rei
959#else
960 mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
961 mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
962
963// DEBUGSTORE(0x45)
964// DEBUG_EXC_ADDR()
965 // Real MM mapping
966 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
967 mtpr r10, pt6 // Stash exc_addr away
968
969pal_dtb_ldq:
970 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
971 nop // Pad MF VA
972
973 mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
974 nop
975
976 mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
977 blbc r8, invalid_dpte_handler // Handle invalid PTE
978
979 mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
980 mfpr r10, pt6
981
982 // Following 2 instructions take 2 cycles
983 mtpr r10, exc_addr // Return linkage in case we trapped. E1.
984 mfpr r31, pt0 // Pad the write to dtb_tag
985
986 hw_rei // Done, return
987#endif
988
989
990
991
992// .sbttl "DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point"
993
994//+
995// DTBMISS_DOUBLE - offset 0280
996// Entry:
997// Vectored into via hardware trap on Double TBmiss from single miss flows.
998//
999// r8 - faulting VA
1000// r9 - original MMstat
1001// r10 - original exc_addr (both itb,dtb miss)
1002// pt6 - original exc_addr (dtb miss flow only)
1003// VA IPR - locked with original faulting VA
1004//
1005// Function:
1006// Get PTE, if valid load TB and return.
1007// If not valid then take TNV/ACV exception.
1008//
1009// pt4 and pt5 are reserved for this flow.
1010//
1011//
1012//-
1013
1014 HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
1015Trap_Dtbmiss_double:
1016#if ldvpte_bug_fix != 0
1017 mtpr r8, pt4 // save r8 to do exc_addr check
1018 mfpr r8, exc_addr
1019 blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
1020 mfpr r8, pt4 // restore r8
1021#endif
1022 nop
1023 mtpr r22, pt5 // Get some scratch space. E1.
1024 // Due to virtual scheme, we can skip the first lookup and go
1025 // right to fetch of level 2 PTE
1026 sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
1027 mtpr r21, pt4 // Get some scratch space. E1.
1028
1029 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
1030 mfpr r21, pt_ptbr // Get physical address of the page table.
1031
1032 nop
1033 addq r21, r22, r21 // Index into page table for level 2 PTE.
1034
1035 sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
1036 ldqp r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
1037
1038 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
1039 blbc r21, double_pte_inv // Check for Invalid PTE.
1040
1041 srl r21, 32, r21 // extract PFN from PTE
1042 sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
1043
1044 addq r21, r22, r21 // Index into page table for level 3 PTE.
1045 nop
1046
1047 ldqp r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
1048 blbc r21, double_pte_inv // Check for invalid PTE.
1049
1050 mtpr r21, ev5__dtb_pte // Write the PTE. E0.
1051 mfpr r22, pt5 // Restore scratch register
1052
1053 mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
1054 mfpr r21, pt4 // Restore scratch register
1055
1056 nop // Pad write to tag.
1057 nop
1058
1059 nop // Pad write to tag.
1060 nop
1061
1062 hw_rei
1063
1064
1065
1066// .sbttl "UNALIGN -- Dstream unalign trap"
1067//+
1068// UNALIGN - offset 0300
1069// Entry:
1070// Vectored into via hardware trap on unaligned Dstream reference.
1071//
1072// Function:
1073// Build stack frame
1074// a0 <- Faulting VA
1075// a1 <- Opcode
1076// a2 <- src/dst register number
1077// vector via entUna
1078//-
1079
1080 HDW_VECTOR(PAL_UNALIGN_ENTRY)
1081Trap_Unalign:
1082/* DEBUGSTORE(0x47)*/
1083 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1084 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1085
1086 mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
1087 mfpr r14, exc_addr // get pc
1088
1089 srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
1090 blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
1091
1092 blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
1093 // not set, must be a load
1094 and r13, 0x1F, r8 // isolate ra
1095
1096 cmpeq r8, 0x1F, r8 // check for r31/F31
1097 bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
1098
1099UNALIGN_NO_DISMISS:
1100 bis r11, r31, r12 // Save PS
1101 bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
1102
1103
1104 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1105 // no virt ref for next 2 cycles
1106 mtpr r30, pt_usp // save user stack
1107
1108 bis r31, r31, r12 // Set new PS
1109 mfpr r30, pt_ksp
1110
1111UNALIGN_NO_DISMISS_10_:
1112 mfpr r25, ev5__va // Unlock VA
1113 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1114
1115 mtpr r25, pt0 // Stash VA
1116 stq r18, osfsf_a2(sp) // a2
1117
1118 stq r11, osfsf_ps(sp) // save old ps
1119 srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
1120
1121 stq r29, osfsf_gp(sp) // save gp
1122 addq r14, 4, r14 // inc PC past the ld/st
1123
1124 stq r17, osfsf_a1(sp) // a1
1125 and r25, mm_stat_m_opcode, r17// Clean opocde for a1
1126
1127 stq r16, osfsf_a0(sp) // save regs
1128 mfpr r16, pt0 // a0 <- va/unlock
1129
1130 stq r14, osfsf_pc(sp) // save pc
1131 mfpr r25, pt_entuna // get entry point
1132
1133
1134 bis r12, r31, r11 // update ps
1135 br r31, unalign_trap_cont
1136
1137
1138
1139
1140// .sbttl "DFAULT - Dstream Fault Trap Entry Point"
1141
1142//+
1143// DFAULT - offset 0380
1144// Entry:
1145// Vectored into via hardware trap on dstream fault or sign check error on DVA.
1146//
1147// Function:
1148// Ignore faults on FETCH/FETCH_M
1149// Check for DFAULT in PAL
1150// Build stack frame
1151// a0 <- Faulting VA
1152// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
1153// a2 <- R/W
1154// vector via entMM
1155//
1156//-
1157 HDW_VECTOR(PAL_D_FAULT_ENTRY)
1158Trap_Dfault:
1159// DEBUGSTORE(0x48)
1160 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1161 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1162
1163 mfpr r13, ev5__mm_stat // Get mmstat
1164 mfpr r8, exc_addr // get pc, preserve r14
1165
1166 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
1167 blbs r8, dfault_in_pal
1168
1169 bis r8, r31, r14 // move exc_addr to correct place
1170 bis r11, r31, r12 // Save PS
1171
1172 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1173 // no virt ref for next 2 cycles
1174 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
1175
1176 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
1177 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
1178
1179 //dismiss exception if load to r31/f31
1180 blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
1181
1182 // not a store or fetch, must be a load
1183 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
1184
1185 and r9, 0x1F, r9 // isolate rnum
1186 nop
1187
1188 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
1189 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
1190
1191dfault_no_dismiss:
1192 and r13, 0xf, r13 // Clean extra bits in mm_stat
1193 bge r25, dfault_trap_cont // no stack swap needed if cm=kern
1194
1195
1196 mtpr r30, pt_usp // save user stack
1197 bis r31, r31, r12 // Set new PS
1198
1199 mfpr r30, pt_ksp
1200 br r31, dfault_trap_cont
1201
1202
1203
1204
1205
1206// .sbttl "MCHK - Machine Check Trap Entry Point"
1207
1208//+
1209// MCHK - offset 0400
1210// Entry:
1211// Vectored into via hardware trap on machine check.
1212//
1213// Function:
1214//
1215//-
1216
1217 HDW_VECTOR(PAL_MCHK_ENTRY)
1218Trap_Mchk:
1219 DEBUGSTORE(0x49)
1220 mtpr r31, ic_flush_ctl // Flush the Icache
1221 br r31, sys_machine_check
1222
1223
1224
1225
1226// .sbttl "OPCDEC - Illegal Opcode Trap Entry Point"
1227
1228//+
1229// OPCDEC - offset 0480
1230// Entry:
1231// Vectored into via hardware trap on illegal opcode.
1232//
1233// Build stack frame
1234// a0 <- code
1235// a1 <- unpred
1236// a2 <- unpred
1237// vector via entIF
1238//
1239//-
1240
1241 HDW_VECTOR(PAL_OPCDEC_ENTRY)
1242Trap_Opcdec:
1243 DEBUGSTORE(0x4a)
1244//simos DEBUG_EXC_ADDR()
1245 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1246 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1247
1248 mfpr r14, exc_addr // get pc
1249 blbs r14, pal_pal_bug_check // check opcdec in palmode
1250
1251 bis r11, r31, r12 // Save PS
1252 bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
1253
1254
1255 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1256 // no virt ref for next 2 cycles
1257 mtpr r30, pt_usp // save user stack
1258
1259 bis r31, r31, r12 // Set new PS
1260 mfpr r30, pt_ksp
1261
1262TRAP_OPCDEC_10_:
1263 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1264 addq r14, 4, r14 // inc pc
1265
1266 stq r16, osfsf_a0(sp) // save regs
1267 bis r31, osf_a0_opdec, r16 // set a0
1268
1269 stq r11, osfsf_ps(sp) // save old ps
1270 mfpr r13, pt_entif // get entry point
1271
1272 stq r18, osfsf_a2(sp) // a2
1273 stq r17, osfsf_a1(sp) // a1
1274
1275 stq r29, osfsf_gp(sp) // save gp
1276 stq r14, osfsf_pc(sp) // save pc
1277
1278 bis r12, r31, r11 // update ps
1279 mtpr r13, exc_addr // load exc_addr with entIF
1280 // 1 cycle to hw_rei, E1
1281
1282 mfpr r29, pt_kgp // get the kgp, E1
1283
1284 hw_rei_spe // done, E1
1285
1286
1287
1288
1289
1290
1291// .sbttl "ARITH - Arithmetic Exception Trap Entry Point"
1292
1293//+
1294// ARITH - offset 0500
1295// Entry:
1296// Vectored into via hardware trap on arithmetic excpetion.
1297//
1298// Function:
1299// Build stack frame
1300// a0 <- exc_sum
1301// a1 <- exc_mask
1302// a2 <- unpred
1303// vector via entArith
1304//
1305//-
1306 HDW_VECTOR(PAL_ARITH_ENTRY)
1307Trap_Arith:
1308 DEBUGSTORE(0x4b)
1309 and r11, osfps_m_mode, r12 // get mode bit
1310 mfpr r31, ev5__va // unlock mbox
1311
1312 bis r11, r31, r25 // save ps
1313 mfpr r14, exc_addr // get pc
1314
1315 nop
1316 blbs r14, pal_pal_bug_check // arith trap from PAL
1317
1318 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1319 // no virt ref for next 2 cycles
1320 beq r12, TRAP_ARITH_10_ // if zero we are in kern now
1321
1322 bis r31, r31, r25 // set the new ps
1323 mtpr r30, pt_usp // save user stack
1324
1325 nop
1326 mfpr r30, pt_ksp // get kern stack
1327
1328TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
1329 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1330
1331 nop // Pad current mode write and stq
1332 mfpr r13, ev5__exc_sum // get the exc_sum
1333
1334 mfpr r12, pt_entarith
1335 stq r14, osfsf_pc(sp) // save pc
1336
1337 stq r17, osfsf_a1(sp)
1338 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
1339
1340 stq r11, osfsf_ps(sp) // save ps
1341 bis r25, r31, r11 // set new ps
1342
1343 stq r16, osfsf_a0(sp) // save regs
1344 srl r13, exc_sum_v_swc, r16// shift data to correct position
1345
1346 stq r18, osfsf_a2(sp)
1347// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
1348 mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
1349
1350 stq r29, osfsf_gp(sp)
1351 mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
1352
1353 mfpr r29, pt_kgp // get the kern gp - E1
1354 hw_rei_spe // done - E1
1355
1356
1357
1358
1359
1360
1361// .sbttl "FEN - Illegal Floating Point Operation Trap Entry Point"
1362
1363//+
1364// FEN - offset 0580
1365// Entry:
1366// Vectored into via hardware trap on illegal FP op.
1367//
1368// Function:
1369// Build stack frame
1370// a0 <- code
1371// a1 <- unpred
1372// a2 <- unpred
1373// vector via entIF
1374//
1375//-
1376
1377 HDW_VECTOR(PAL_FEN_ENTRY)
1378Trap_Fen:
1379 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1380 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1381
1382 mfpr r14, exc_addr // get pc
1383 blbs r14, pal_pal_bug_check // check opcdec in palmode
1384
1385 mfpr r13, ev5__icsr
1386 nop
1387
1388 bis r11, r31, r12 // Save PS
1389 bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
1390
1391 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1392 // no virt ref for next 2 cycles
1393 mtpr r30, pt_usp // save user stack
1394
1395 bis r31, r31, r12 // Set new PS
1396 mfpr r30, pt_ksp
1397
1398TRAP_FEN_10_:
1399 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1400 srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
1401
1402
1403 stq r16, osfsf_a0(sp) // save regs
1404 mfpr r13, pt_entif // get entry point
1405
1406 stq r18, osfsf_a2(sp) // a2
1407 stq r11, osfsf_ps(sp) // save old ps
1408
1409 stq r29, osfsf_gp(sp) // save gp
1410 bis r12, r31, r11 // set new ps
1411
1412 stq r17, osfsf_a1(sp) // a1
1413 blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
1414
1415 bis r31, osf_a0_fen, r16 // set a0
1416 stq r14, osfsf_pc(sp) // save pc
1417
1418 mtpr r13, exc_addr // load exc_addr with entIF
1419 // 1 cycle to hw_rei -E1
1420
1421 mfpr r29, pt_kgp // get the kgp -E1
1422
1423 hw_rei_spe // done -E1
1424
1425// FEN trap was taken, but the fault is really opcdec.
1426 ALIGN_BRANCH
1427fen_to_opcdec:
1428 addq r14, 4, r14 // save PC+4
1429 bis r31, osf_a0_opdec, r16 // set a0
1430
1431 stq r14, osfsf_pc(sp) // save pc
1432 mtpr r13, exc_addr // load exc_addr with entIF
1433 // 1 cycle to hw_rei
1434
1435 mfpr r29, pt_kgp // get the kgp
1436 hw_rei_spe // done
1437
1438
1439
1440// .sbttl "Misc handlers"
1441 // Start area for misc code.
1442//+
1443//dfault_trap_cont
1444// A dfault trap has been taken. The sp has been updated if necessary.
1445// Push a stack frame a vector via entMM.
1446//
1447// Current state:
1448// r12 - new PS
1449// r13 - MMstat
1450// VA - locked
1451//
1452//-
1453 ALIGN_BLOCK
1454dfault_trap_cont:
1455 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1456 mfpr r25, ev5__va // Fetch VA/unlock
1457
1458 stq r18, osfsf_a2(sp) // a2
1459 and r13, 1, r18 // Clean r/w bit for a2
1460
1461 stq r16, osfsf_a0(sp) // save regs
1462 bis r25, r31, r16 // a0 <- va
1463
1464 stq r17, osfsf_a1(sp) // a1
1465 srl r13, 1, r17 // shift fault bits to right position
1466
1467 stq r11, osfsf_ps(sp) // save old ps
1468 bis r12, r31, r11 // update ps
1469
1470 stq r14, osfsf_pc(sp) // save pc
1471 mfpr r25, pt_entmm // get entry point
1472
1473 stq r29, osfsf_gp(sp) // save gp
1474 cmovlbs r17, 1, r17 // a2. acv overrides fox.
1475
1476 mtpr r25, exc_addr // load exc_addr with entMM
1477 // 1 cycle to hw_rei
1478 mfpr r29, pt_kgp // get the kgp
1479
1480 hw_rei_spe // done
1481
1482//+
1483//unalign_trap_cont
1484// An unalign trap has been taken. Just need to finish up a few things.
1485//
1486// Current state:
1487// r25 - entUna
1488// r13 - shifted MMstat
1489//
1490//-
1491 ALIGN_BLOCK
1492unalign_trap_cont:
1493 mtpr r25, exc_addr // load exc_addr with entUna
1494 // 1 cycle to hw_rei
1495
1496
1497 mfpr r29, pt_kgp // get the kgp
1498 and r13, mm_stat_m_ra, r18 // Clean Ra for a2
1499
1500 hw_rei_spe // done
1501
1502
1503
1504//+
1505// dfault_in_pal
1506// Dfault trap was taken, exc_addr points to a PAL PC.
1507// r9 - mmstat<opcode> right justified
1508// r8 - exception address
1509//
1510// These are the cases:
1511// opcode was STQ -- from a stack builder, KSP not valid halt
1512// r14 - original exc_addr
1513// r11 - original PS
1514// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
1515// KSP not valid halt
1516// r11 - original PS
1517// r14 - original exc_addr
1518// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
1519// r11 - original PS
1520// r14 - original exc_addr
1521// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
1522// r10 - original exc_addr
1523// r11 - original PS
1524//
1525//
1526//-
1527 ALIGN_BLOCK
1528dfault_in_pal:
1529 DEBUGSTORE(0x50)
1530 bic r8, 3, r8 // Clean PC
1531 mfpr r9, pal_base
1532
1533 mfpr r31, va // unlock VA
1534#if real_mm != 0
1535 // if not real_mm, should never get here from miss flows
1536
1537 subq r9, r8, r8 // pal_base - offset
1538
1539 lda r9, pal_itb_ldq-pal_base(r8)
1540 nop
1541
1542 beq r9, dfault_do_bugcheck
1543 lda r9, pal_dtb_ldq-pal_base(r8)
1544
1545 beq r9, dfault_do_bugcheck
1546#endif
1547
1548//
1549// KSP invalid halt case --
1550ksp_inval_halt:
1551 DEBUGSTORE(76)
1552 bic r11, osfps_m_mode, r11 // set ps to kernel mode
1553 mtpr r0, pt0
1554
1555 mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
1556 mtpr r31, ips
1557
1558 mtpr r14, exc_addr // Set PC to instruction that caused trouble
1559//orig pvc_jsr updpcb, bsr=1
1560 bsr r0, pal_update_pcb // update the pcb
1561
1562 lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
1563 br r31, sys_enter_console // enter the console
1564
1565 ALIGN_BRANCH
1566dfault_do_bugcheck:
1567 bis r10, r31, r14 // bugcheck expects exc_addr in r14
1568 br r31, pal_pal_bug_check
1569
1570
1571 ALIGN_BLOCK
1572//+
1573// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
1574// On entry -
1575// r14 - exc_addr
1576// VA is locked
1577//
1578//-
1579dfault_fetch_ldr31_err:
1580 mtpr r11, ev5__dtb_cm
1581 mtpr r11, ev5__ps // Make sure ps hasn't changed
1582
1583 mfpr r31, va // unlock the mbox
1584 addq r14, 4, r14 // inc the pc to skip the fetch
1585
1586 mtpr r14, exc_addr // give ibox new PC
1587 mfpr r31, pt0 // pad exc_addr write
1588
1589 hw_rei
1590
1591
1592
1593 ALIGN_BLOCK
1594//+
1595// sys_from_kern
1596// callsys from kernel mode - OS bugcheck machine check
1597//
1598//-
1599sys_from_kern:
1600 mfpr r14, exc_addr // PC points to call_pal
1601 subq r14, 4, r14
1602
1603 lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
1604 br r31, pal_pal_mchk
1605
1606
1607// .sbttl "Continuation of long call_pal flows"
1608 ALIGN_BLOCK
1609//+
1610// wrent_tbl
1611// Table to write *int in paltemps.
1612// 4 instructions/entry
1613// r16 has new value
1614//
1615//-
1616wrent_tbl:
1617//orig pvc_jsr wrent, dest=1
1618 nop
1619 mtpr r16, pt_entint
1620
1621 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1622 hw_rei
1623
1624
1625//orig pvc_jsr wrent, dest=1
1626 nop
1627 mtpr r16, pt_entarith
1628
1629 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1630 hw_rei
1631
1632
1633//orig pvc_jsr wrent, dest=1
1634 nop
1635 mtpr r16, pt_entmm
1636
1637 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1638 hw_rei
1639
1640
1641//orig pvc_jsr wrent, dest=1
1642 nop
1643 mtpr r16, pt_entif
1644
1645 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1646 hw_rei
1647
1648
1649//orig pvc_jsr wrent, dest=1
1650 nop
1651 mtpr r16, pt_entuna
1652
1653 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1654 hw_rei
1655
1656
1657//orig pvc_jsr wrent, dest=1
1658 nop
1659 mtpr r16, pt_entsys
1660
1661 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1662 hw_rei
1663
1664 ALIGN_BLOCK
1665//+
1666// tbi_tbl
1667// Table to do tbi instructions
1668// 4 instructions per entry
1669//-
1670tbi_tbl:
1671 // -2 tbia
1672//orig pvc_jsr tbi, dest=1
1673 mtpr r31, ev5__dtb_ia // Flush DTB
1674 mtpr r31, ev5__itb_ia // Flush ITB
1675
1676#if icflush_on_tbix != 0
1677
1678
1679 br r31, pal_ic_flush // Flush Icache
1680#else
1681
1682 hw_rei_stall
1683#endif
1684
1685 nop // Pad table
1686
1687 // -1 tbiap
1688//orig pvc_jsr tbi, dest=1
1689 mtpr r31, ev5__dtb_iap // Flush DTB
1690 mtpr r31, ev5__itb_iap // Flush ITB
1691
1692#if icflush_on_tbix != 0
1693
1694
1695 br r31, pal_ic_flush // Flush Icache
1696#else
1697
1698 hw_rei_stall
1699#endif
1700
1701 nop // Pad table
1702
1703
1704 // 0 unused
1705//orig pvc_jsr tbi, dest=1
1706 hw_rei // Pad table
1707 nop
1708 nop
1709 nop
1710
1711
1712 // 1 tbisi
1713//orig pvc_jsr tbi, dest=1
1714#if icflush_on_tbix != 0
1715
1716
1717
1718 nop
1719 br r31, pal_ic_flush_and_tbisi // Flush Icache
1720 nop
1721 nop // Pad table
1722#else
1723
1724 nop
1725 nop
1726 mtpr r17, ev5__itb_is // Flush ITB
1727 hw_rei_stall
1728#endif
1729
1730
1731
1732 // 2 tbisd
1733//orig pvc_jsr tbi, dest=1
1734 mtpr r17, ev5__dtb_is // Flush DTB.
1735 nop
1736
1737 nop
1738 hw_rei_stall
1739
1740
1741 // 3 tbis
1742//orig pvc_jsr tbi, dest=1
1743 mtpr r17, ev5__dtb_is // Flush DTB
1744#if icflush_on_tbix != 0
1745
1746
1747 br r31, pal_ic_flush_and_tbisi // Flush Icache and ITB
1748#else
1749 br r31, tbi_finish
1750 ALIGN_BRANCH
1751tbi_finish:
1752 mtpr r17, ev5__itb_is // Flush ITB
1753 hw_rei_stall
1754#endif
1755
1756
1757
1758 ALIGN_BLOCK
1759//+
1760// bpt_bchk_common:
1761// Finish up the bpt/bchk instructions
1762//-
1763bpt_bchk_common:
1764 stq r18, osfsf_a2(sp) // a2
1765 mfpr r13, pt_entif // get entry point
1766
1767 stq r12, osfsf_ps(sp) // save old ps
1768 stq r14, osfsf_pc(sp) // save pc
1769
1770 stq r29, osfsf_gp(sp) // save gp
1771 mtpr r13, exc_addr // load exc_addr with entIF
1772 // 1 cycle to hw_rei
1773
1774 mfpr r29, pt_kgp // get the kgp
1775
1776
1777 hw_rei_spe // done
1778
1779
1780 ALIGN_BLOCK
1781//+
1782// rti_to_user
1783// Finish up the rti instruction
1784//-
1785rti_to_user:
1786 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
1787 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
1788
1789 mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
1790 mtpr r25, pt_ksp // save off incase RTI to user
1791
1792 mfpr r30, pt_usp
1793 hw_rei_spe // and back
1794
1795
1796 ALIGN_BLOCK
1797//+
1798// rti_to_kern
1799// Finish up the rti instruction
1800//-
1801rti_to_kern:
1802 and r12, osfps_m_ipl, r11 // clean ps
1803 mfpr r12, pt_intmask // get int mask
1804
1805 extbl r12, r11, r12 // get mask for this ipl
1806 mtpr r25, pt_ksp // save off incase RTI to user
1807
1808 mtpr r12, ev5__ipl // set the new ipl.
1809 or r25, r31, sp // sp
1810
1811// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
1812 hw_rei
1813
1814 ALIGN_BLOCK
1815//+
1816// swpctx_cont
1817// Finish up the swpctx instruction
1818//-
1819
1820swpctx_cont:
1821#if ev5_p1 != 0
1822
1823
1824 bic r25, r24, r25 // clean icsr<FPE>
1825 get_impure r8 // get impure pointer
1826
1827 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1828 fix_impure_ipr r8 // adjust impure pointer
1829
1830 restore_reg1 pmctr_ctl, r8, r8, ipr=1 // "ldqp" - get pmctr_ctl bits
1831 srl r23, 32, r24 // move asn to low asn pos
1832
1833 ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
1834 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1835
1836 or r25, r12, r25 // icsr with new fen
1837 sll r24, itb_asn_v_asn, r12
1838
1839#else
1840
1841 bic r25, r24, r25 // clean icsr<FPE,PMP>
1842 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1843
1844 ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
1845 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1846
1847 or r25, r12, r25 // icsr with new fen
1848 srl r23, 32, r24 // move asn to low asn pos
1849
1850 and r22, 1, r22
1851 sll r24, itb_asn_v_asn, r12
1852
1853 sll r22, icsr_v_pmp, r22
1854 nop
1855
1856 or r25, r22, r25 // icsr with new pme
1857#endif
1858
1859 sll r24, dtb_asn_v_asn, r24
1860
1861 subl r23, r13, r13 // gen new cc offset
1862 mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
1863
1864 mtpr r24, dtb_asn // Load up new ASN
1865 mtpr r25, icsr // write the icsr
1866
1867 sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1868 ldqp r25, osfpcb_q_usp(r16) // get new usp
1869
1870 insll r13, 4, r13 // >> 32
1871// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
1872 mtpr r14, pt_ptbr // load the new ptbr
1873
1874 mtpr r13, cc // set new offset
1875 ldqp r30, osfpcb_q_ksp(r16) // get new ksp
1876
1877// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
1878 mtpr r25, pt_usp // save usp
1879
1880#if ev5_p1 != 0
1881
1882
1883 blbc r8, no_pm_change // if monitoring all processes -- no need to change pm
1884
1885 // otherwise, monitoring select processes - update pm
1886 lda r25, 0x3F(r31)
1887 cmovlbc r22, r31, r8 // if pme set, disable counters, otherwise use saved encodings
1888
1889 sll r25, pmctr_v_ctl2, r25 // create ctl field bit mask
1890 mfpr r22, ev5__pmctr
1891
1892 and r8, r25, r8 // mask new ctl value
1893 bic r22, r25, r22 // clear ctl field in pmctr
1894
1895 or r8, r22, r8
1896 mtpr r8, ev5__pmctr
1897
1898no_pm_change:
1899#endif
1900
1901
1902#if osf_chm_fix != 0
1903
1904
1905 p4_fixup_hw_rei_stall // removes this section for Pass 4 by placing a hw_rei_stall here
1906
1907#if build_fixed_image != 0
1908
1909
1910 hw_rei_stall
1911#else
1912
1913 mfpr r9, pt_pcbb // get FEN
1914#endif
1915
1916 ldqp r9, osfpcb_q_fen(r9)
1917 blbc r9, no_pm_change_10_ // skip if FEN disabled
1918
1919 mb // ensure no outstanding fills
1920 lda r12, 1<<dc_mode_v_dc_ena(r31)
1921 mtpr r12, dc_mode // turn dcache on so we can flush it
1922 nop // force correct slotting
1923 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1924 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1925 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1926 mfpr r31, pt0 // no mbox instructions in 1,2,3,4
1927
1928 lda r8, 0(r31) // flood the dcache with junk data
1929no_pm_change_5_: ldqp r31, 0(r8)
1930 lda r8, 0x20(r8) // touch each cache block
1931 srl r8, 13, r9
1932 blbc r9, no_pm_change_5_
1933
1934 mb // ensure no outstanding fills
1935 mtpr r31, dc_mode // turn the dcache back off
1936 nop // force correct slotting
1937 mfpr r31, pt0 // no hw_rei_stall in 0,1
1938#endif
1939
1940
1941no_pm_change_10_: hw_rei_stall // back we go
1942
1943 ALIGN_BLOCK
1944//+
1945// swppal_cont - finish up the swppal call_pal
1946//-
1947
1948swppal_cont:
1949 mfpr r2, pt_misc // get misc bits
1950 sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1951 or r2, r0, r2 // set the bit
1952 mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
1953 mtpr r2, pt_misc // update the chip
1954
1955 or r3, r31, r4
1956 mfpr r3, pt_impure // pass pointer to the impure area in r3
1957//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
1958//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
1959//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
1960//orig unfix_impure_ipr r3 // restore impure pointer
1961 lda r3, CNS_Q_IPR(r3)
1962 RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1963 RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1964 lda r3, -CNS_Q_IPR(r3)
1965
1966 or r31, r31, r0 // set status to success
1967// pvc_violate 1007
1968 jmp r31, (r4) // and call our friend, it's her problem now
1969
1970
1971swppal_fail:
1972 addq r0, 1, r0 // set unknown pal or not loaded
1973 hw_rei // and return
1974
1975
1976// .sbttl "Memory management"
1977
1978 ALIGN_BLOCK
1979//+
1980//foe_ipte_handler
1981// IFOE detected on level 3 pte, sort out FOE vs ACV
1982//
1983// on entry:
1984// with
1985// R8 = pte
1986// R10 = pc
1987//
1988// Function
1989// Determine TNV vs ACV vs FOE. Build stack and dispatch
1990// Will not be here if TNV.
1991//-
1992
1993foe_ipte_handler:
1994 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1995 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1996
1997 bis r11, r31, r12 // Save PS for stack write
1998 bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
1999
2000
2001 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2002 // no virt ref for next 2 cycles
2003 mtpr r30, pt_usp // save user stack
2004
2005 bis r31, r31, r11 // Set new PS
2006 mfpr r30, pt_ksp
2007
2008 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2009 nop
2010
2011foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
2012 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2013
2014 or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
2015 mfpr r13, pt_entmm // get entry point
2016
2017 stq r16, osfsf_a0(sp) // a0
2018 or r14, r31, r16 // pass pc/va as a0
2019
2020 stq r17, osfsf_a1(sp) // a1
2021 nop
2022
2023 stq r18, osfsf_a2(sp) // a2
2024 lda r17, mmcsr_c_acv(r31) // assume ACV
2025
2026 stq r16, osfsf_pc(sp) // save pc
2027 cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
2028
2029 stq r12, osfsf_ps(sp) // save ps
2030 subq r31, 1, r18 // pass flag of istream as a2
2031
2032 stq r29, osfsf_gp(sp)
2033 mtpr r13, exc_addr // set vector address
2034
2035 mfpr r29, pt_kgp // load kgp
2036 hw_rei_spe // out to exec
2037
2038 ALIGN_BLOCK
2039//+
2040//invalid_ipte_handler
2041// TNV detected on level 3 pte, sort out TNV vs ACV
2042//
2043// on entry:
2044// with
2045// R8 = pte
2046// R10 = pc
2047//
2048// Function
2049// Determine TNV vs ACV. Build stack and dispatch.
2050//-
2051
2052invalid_ipte_handler:
2053 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2054 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2055
2056 bis r11, r31, r12 // Save PS for stack write
2057 bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
2058
2059
2060 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2061 // no virt ref for next 2 cycles
2062 mtpr r30, pt_usp // save user stack
2063
2064 bis r31, r31, r11 // Set new PS
2065 mfpr r30, pt_ksp
2066
2067 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2068 nop
2069
2070invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
2071 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2072
2073 or r10, r31, r14 // Save pc/va in case TBmiss on stack
2074 mfpr r13, pt_entmm // get entry point
2075
2076 stq r16, osfsf_a0(sp) // a0
2077 or r14, r31, r16 // pass pc/va as a0
2078
2079 stq r17, osfsf_a1(sp) // a1
2080 nop
2081
2082 stq r18, osfsf_a2(sp) // a2
2083 and r25, 1, r17 // Isolate kre
2084
2085 stq r16, osfsf_pc(sp) // save pc
2086 xor r17, 1, r17 // map to acv/tnv as a1
2087
2088 stq r12, osfsf_ps(sp) // save ps
2089 subq r31, 1, r18 // pass flag of istream as a2
2090
2091 stq r29, osfsf_gp(sp)
2092 mtpr r13, exc_addr // set vector address
2093
2094 mfpr r29, pt_kgp // load kgp
2095 hw_rei_spe // out to exec
2096
2097
2098
2099
2100 ALIGN_BLOCK
2101//+
2102//invalid_dpte_handler
2103// INVALID detected on level 3 pte, sort out TNV vs ACV
2104//
2105// on entry:
2106// with
2107// R10 = va
2108// R8 = pte
2109// R9 = mm_stat
2110// PT6 = pc
2111//
2112// Function
2113// Determine TNV vs ACV. Build stack and dispatch
2114//-
2115
2116
2117invalid_dpte_handler:
2118 mfpr r12, pt6
2119 blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
2120
2121 bis r12, r31, r14 // save PC in case of tbmiss or fault
2122 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
2123
2124 mtpr r11, pt0 // Save PS for stack write
2125 and r25, mm_stat_m_opcode, r25 // isolate opcode
2126
2127 cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
2128 blbs r25, nmiss_fetch_ldr31_err // yes
2129
2130 //dismiss exception if load to r31/f31
2131 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
2132
2133 // not a store or fetch, must be a load
2134 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
2135
2136 and r25, 0x1F, r25 // isolate rnum
2137 nop
2138
2139 cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
2140 bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
2141
2142invalid_dpte_no_dismiss:
2143 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2144 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2145
2146 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2147 // no virt ref for next 2 cycles
2148 bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
2149
2150 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
2151 mtpr r30, pt_usp // save user stack
2152
2153 bis r31, r31, r11 // Set new PS
2154 mfpr r30, pt_ksp
2155
2156invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
2157 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2158
2159 or r10, r31, r25 // Save va in case TBmiss on stack
2160 and r9, 1, r13 // save r/w flag
2161
2162 stq r16, osfsf_a0(sp) // a0
2163 or r25, r31, r16 // pass va as a0
2164
2165 stq r17, osfsf_a1(sp) // a1
2166 or r31, mmcsr_c_acv, r17 // assume acv
2167
2168 srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
2169 stq r29, osfsf_gp(sp)
2170
2171 stq r18, osfsf_a2(sp) // a2
2172 cmovlbs r13, r25, r12 // if write access move acv based on write enable
2173
2174 or r13, r31, r18 // pass flag of dstream access and read vs write
2175 mfpr r25, pt0 // get ps
2176
2177 stq r14, osfsf_pc(sp) // save pc
2178 mfpr r13, pt_entmm // get entry point
2179
2180 stq r25, osfsf_ps(sp) // save ps
2181 mtpr r13, exc_addr // set vector address
2182
2183 mfpr r29, pt_kgp // load kgp
2184 cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
2185
2186 hw_rei_spe // out to exec
2187
2188//+
2189//
2190// We come here if we are erring on a dtb_miss, and the instr is a
2191// fetch, fetch_m, of load to r31/f31.
2192// The PC is incremented, and we return to the program.
2193// essentially ignoring the instruction and error.
2194//
2195//-
2196 ALIGN_BLOCK
2197nmiss_fetch_ldr31_err:
2198 mfpr r12, pt6
2199 addq r12, 4, r12 // bump pc to pc+4
2200
2201 mtpr r12, exc_addr // and set entry point
2202 mfpr r31, pt0 // pad exc_addr write
2203
2204 hw_rei //
2205
2206 ALIGN_BLOCK
2207//+
2208// double_pte_inv
2209// We had a single tbmiss which turned into a double tbmiss which found
2210// an invalid PTE. Return to single miss with a fake pte, and the invalid
2211// single miss flow will report the error.
2212//
2213// on entry:
2214// r21 PTE
2215// r22 available
2216// VA IPR locked with original fault VA
2217// pt4 saved r21
2218// pt5 saved r22
2219// pt6 original exc_addr
2220//
2221// on return to tbmiss flow:
2222// r8 fake PTE
2223//
2224//
2225//-
2226double_pte_inv:
2227 srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
2228 mfpr r22, exc_addr // get the pc
2229
2230 lda r22, 4(r22) // inc the pc
2231 lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
2232
2233 cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
2234 mtpr r22, exc_addr // set for rei
2235
2236 mfpr r21, pt4 // restore regs
2237 mfpr r22, pt5 // restore regs
2238
2239 hw_rei // back to tb miss
2240
2241 ALIGN_BLOCK
2242//+
2243//tnv_in_pal
2244// The only places in pal that ld or store are the
2245// stack builders, rti or retsys. Any of these mean we
2246// need to take a ksp not valid halt.
2247//
2248//-
2249tnv_in_pal:
2250
2251
2252 br r31, ksp_inval_halt
2253
2254
2255// .sbttl "Icache flush routines"
2256
2257 ALIGN_BLOCK
2258//+
2259// Common Icache flush routine.
2260//
2261//
2262//-
2263pal_ic_flush:
2264 nop
2265 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
2266 nop
2267 nop
2268
2269// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2270 nop
2271 nop
2272 nop
2273 nop
2274
2275 nop
2276 nop
2277 nop
2278 nop
2279
2280 nop
2281 nop // 10
2282
2283 nop
2284 nop
2285 nop
2286 nop
2287
2288 nop
2289 nop
2290 nop
2291 nop
2292
2293 nop
2294 nop // 20
2295
2296 nop
2297 nop
2298 nop
2299 nop
2300
2301 nop
2302 nop
2303 nop
2304 nop
2305
2306 nop
2307 nop // 30
2308 nop
2309 nop
2310 nop
2311 nop
2312
2313 nop
2314 nop
2315 nop
2316 nop
2317
2318 nop
2319 nop // 40
2320
2321 nop
2322 nop
2323
2324one_cycle_and_hw_rei:
2325 nop
2326 nop
2327
2328 hw_rei_stall
2329
2330#if icflush_on_tbix != 0
2331
2332
2333 ALIGN_BLOCK
2334
2335//+
2336// Common Icache flush and ITB invalidate single routine.
2337// ITBIS and hw_rei_stall must be in same octaword.
2338// r17 - has address to invalidate
2339//
2340//-
2341PAL_IC_FLUSH_AND_TBISI:
2342 nop
2343 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
2344 nop
2345 nop
2346
2347// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
2348 nop
2349 nop
2350 nop
2351 nop
2352
2353 nop
2354 nop
2355 nop
2356 nop
2357
2358 nop
2359 nop // 10
2360
2361 nop
2362 nop
2363 nop
2364 nop
2365
2366 nop
2367 nop
2368 nop
2369 nop
2370
2371 nop
2372 nop // 20
2373
2374 nop
2375 nop
2376 nop
2377 nop
2378
2379 nop
2380 nop
2381 nop
2382 nop
2383
2384 nop
2385 nop // 30
2386 nop
2387 nop
2388 nop
2389 nop
2390
2391 nop
2392 nop
2393 nop
2394 nop
2395
2396 nop
2397 nop // 40
2398
2399
2400 nop
2401 nop
2402
2403 nop
2404 nop
2405
2406 // A quadword is 64 bits, so an octaword is 128 bits -> 16 bytes -> 4 instructions
2407 // 44 nops plus 4 instructions before it is 48 instructions.
2408 // Since this routine started on a 32-byte (8 instruction) boundary,
2409 // the following 2 instructions will be in the same octword as required.
2410// ALIGN_BRANCH
2411 mtpr r17, ev5__itb_is // Flush ITB
2412 hw_rei_stall
2413
2414#endif
2415
2416 ALIGN_BLOCK
2417//+
2418//osfpal_calpal_opcdec
2419// Here for all opcdec CALL_PALs
2420//
2421// Build stack frame
2422// a0 <- code
2423// a1 <- unpred
2424// a2 <- unpred
2425// vector via entIF
2426//
2427//-
2428
2429osfpal_calpal_opcdec:
2430 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
2431 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2432
2433 mfpr r14, exc_addr // get pc
2434 nop
2435
2436 bis r11, r31, r12 // Save PS for stack write
2437 bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
2438
2439
2440 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2441 // no virt ref for next 2 cycles
2442 mtpr r30, pt_usp // save user stack
2443
2444 bis r31, r31, r11 // Set new PS
2445 mfpr r30, pt_ksp
2446
2447osfpal_calpal_opcdec_10_:
2448 lda sp, 0-osfsf_c_size(sp)// allocate stack space
2449 nop
2450
2451 stq r16, osfsf_a0(sp) // save regs
2452 bis r31, osf_a0_opdec, r16 // set a0
2453
2454 stq r18, osfsf_a2(sp) // a2
2455 mfpr r13, pt_entif // get entry point
2456
2457 stq r12, osfsf_ps(sp) // save old ps
2458 stq r17, osfsf_a1(sp) // a1
2459
2460 stq r14, osfsf_pc(sp) // save pc
2461 nop
2462
2463 stq r29, osfsf_gp(sp) // save gp
2464 mtpr r13, exc_addr // load exc_addr with entIF
2465 // 1 cycle to hw_rei
2466
2467 mfpr r29, pt_kgp // get the kgp
2468
2469
2470 hw_rei_spe // done
2471
2472
2473
2474
2475
2476//+
2477//pal_update_pcb
2478// Update the PCB with the current SP, AST, and CC info
2479//
2480// r0 - return linkage
2481//-
2482 ALIGN_BLOCK
2483
2484pal_update_pcb:
2485 mfpr r12, pt_pcbb // get pcbb
2486 and r11, osfps_m_mode, r25 // get mode
2487 beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
2488 mtpr r30, pt_usp // save user stack
2489 stqp r30, osfpcb_q_usp(r12) // store usp
2490 br r31, pal_update_pcb_20_ // join common
2491pal_update_pcb_10_: stqp r30, osfpcb_q_ksp(r12) // store ksp
2492pal_update_pcb_20_: rpcc r13 // get cyccounter
2493 srl r13, 32, r14 // move offset
2494 addl r13, r14, r14 // merge for new time
2495 stlp r14, osfpcb_l_cc(r12) // save time
2496
2497//orig pvc_jsr updpcb, bsr=1, dest=1
2498 ret r31, (r0)
2499
2500
2501
2502#if remove_save_state == 0
2503
2504// .sbttl "PAL_SAVE_STATE"
2505//+
2506//
2507// Pal_save_state
2508//
2509// Function
2510// All chip state saved, all PT's, SR's FR's, IPR's
2511//
2512//
2513// Regs' on entry...
2514//
2515// R0 = halt code
2516// pt0 = r0
2517// R1 = pointer to impure
2518// pt4 = r1
2519// R3 = return addr
2520// pt5 = r3
2521//
2522// register usage:
2523// r0 = halt_code
2524// r1 = addr of impure area
2525// r3 = return_address
2526// r4 = scratch
2527//
2528//-
2529
2530
2531 ALIGN_BLOCK
2532 .globl pal_save_state
2533pal_save_state:
2534//
2535//
2536// start of implementation independent save routine
2537//
2538// the impure area is larger than the addressibility of hw_ld and hw_st
2539// therefore, we need to play some games: The impure area
2540// is informally divided into the "machine independent" part and the
2541// "machine dependent" part. The state that will be saved in the
2542// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
2543// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
2544// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
2545// macros will automagically adjust the offset correctly.
2546//
2547
2548// The distributed code is commented out and followed by corresponding SRC code.
2549// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
2550
2551//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
2552 lda r1, 0x200(r1) // Point to center of CPU segment
2553//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
2554 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
2555//orig store_reg1 hlt, r0, r1, ipr=1
2556 SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
2557
2558 mfpr r0, pt0 // get r0 back //orig
2559//orig store_reg1 0, r0, r1 // save r0
2560 SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
2561
2562 mfpr r0, pt4 // get r1 back //orig
2563//orig store_reg1 1, r0, r1 // save r1
2564 SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
2565
2566//orig store_reg 2 // save r2
2567 SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
2568
2569 mfpr r0, pt5 // get r3 back //orig
2570//orig store_reg1 3, r0, r1 // save r3
2571 SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
2572
2573 // reason code has been saved
2574 // r0 has been saved
2575 // r1 has been saved
2576 // r2 has been saved
2577 // r3 has been saved
2578 // pt0, pt4, pt5 have been lost
2579
2580 //
2581 // Get out of shadow mode
2582 //
2583
2584 mfpr r2, icsr // Get icsr //orig
2585//orig ldah r0, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
2586 ldah r0, (1<<(icsr_v_sde-16))(r31)
2587 bic r2, r0, r0 // ICSR with SDE clear //orig
2588 mtpr r0, icsr // Turn off SDE //orig
2589
2590 mfpr r31, pt0 // SDE bubble cycle 1 //orig
2591 mfpr r31, pt0 // SDE bubble cycle 2 //orig
2592 mfpr r31, pt0 // SDE bubble cycle 3 //orig
2593 nop //orig
2594
2595
2596 // save integer regs R4-r31
2597//orig #define t 4
2598//orig .repeat 28
2599//orig store_reg \t
2600//orig #define t t + 1
2601//orig .endr
2602 SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
2603 SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
2604 SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
2605 SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
2606 SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
2607 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
2608 SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
2609 SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
2610 SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
2611 SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
2612 SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
2613 SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
2614 SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
2615 SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
2616 SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
2617 SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
2618 SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2619 SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2620 SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2621 SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2622 SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2623 SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2624 SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2625 SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2626 SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2627 SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2628 SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2629 SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2630
2631 // save all paltemp regs except pt0
2632
2633//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
2634//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
2635//orig #define t 1
2636//orig .repeat 23
2637//orig store_reg \t , pal=1
2638//orig #define t t + 1
2639//orig .endr
2640
2641 lda r1, -0x200(r1) // Restore the impure base address.
2642 lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
2643 SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
2644 SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
2645 SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
2646 SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
2647 SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
2648 SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
2649 SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
2650 SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
2651 SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
2652 SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
2653 SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
2654 SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
2655 SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
2656 SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
2657 SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
2658 SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
2659 SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
2660 SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
2661 SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
2662 SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
2663 SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2664 SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2665 SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2666 SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2667
2668 // Restore shadow mode
2669 mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write) //orig
2670 mfpr r31, pt0 //orig
2671 mtpr r2, icsr // Restore original ICSR //orig
2672
2673 mfpr r31, pt0 // SDE bubble cycle 1 //orig
2674 mfpr r31, pt0 // SDE bubble cycle 2 //orig
2675 mfpr r31, pt0 // SDE bubble cycle 3 //orig
2676 nop //orig
2677
2678 // save all integer shadow regs
2679
2680//orig #define t 8
2681//orig .repeat 7
2682//orig store_reg \t, shadow=1
2683//orig #define t t + 1
2684//orig .endr
2685//orig store_reg 25, shadow=1
2686
2687 SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
2688 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2689 SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2690 SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2691 SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2692 SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2693 SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2694 SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2695
2696//orig store_reg exc_addr, ipr=1 // save ipr
2697//orig store_reg pal_base, ipr=1 // save ipr
2698//orig store_reg mm_stat, ipr=1 // save ipr
2699//orig store_reg va, ipr=1 // save ipr
2700//orig store_reg icsr, ipr=1 // save ipr
2701//orig store_reg ipl, ipr=1 // save ipr
2702//orig store_reg ps, ipr=1 // save ipr
2703//orig store_reg itb_asn, ipr=1 // save ipr
2704//orig store_reg aster, ipr=1 // save ipr
2705//orig store_reg astrr, ipr=1 // save ipr
2706//orig store_reg sirr, ipr=1 // save ipr
2707//orig store_reg isr, ipr=1 // save ipr
2708//orig store_reg ivptbr, ipr=1 // save ipr
2709//orig store_reg mcsr, ipr=1 // save ipr
2710//orig store_reg dc_mode, ipr=1 // save ipr
2711
2712 SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2713 SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2714 SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
2715 SAVE_IPR(va,CNS_Q_VA,r1)
2716 SAVE_IPR(icsr,CNS_Q_ICSR,r1)
2717 SAVE_IPR(ipl,CNS_Q_IPL,r1)
2718 SAVE_IPR(ips,CNS_Q_IPS,r1)
2719 SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2720 SAVE_IPR(aster,CNS_Q_ASTER,r1)
2721 SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
2722 SAVE_IPR(sirr,CNS_Q_SIRR,r1)
2723 SAVE_IPR(isr,CNS_Q_ISR,r1)
2724 SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2725 SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
2726 SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2727
2728//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
2729//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
2730//orig // PVC violation applies only to
2731pvc$osf35$379: // loads. HW_ST ok here, so ignore
2732 SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
2733
2734
2735 //the following iprs are informational only -- will not be restored
2736
2737//orig store_reg icperr_stat, ipr=1
2738//orig store_reg pmctr, ipr=1
2739//orig store_reg intid, ipr=1
2740//orig store_reg exc_sum, ipr=1
2741//orig store_reg exc_mask, ipr=1
2742//orig ldah r14, 0xfff0(r31)
2743//orig zap r14, 0xE0, r14 // Get Cbox IPR base
2744//orig nop // pad mf dcperr_stat out of shadow of last store
2745//orig nop
2746//orig nop
2747//orig store_reg dcperr_stat, ipr=1
2748
2749 SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
2750 SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
2751 SAVE_IPR(intId,CNS_Q_INT_ID,r1)
2752 SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
2753 SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
2754 ldah r14, 0xFFF0(zero)
2755 zap r14, 0xE0, r14 // Get base address of CBOX IPRs
2756 NOP // Pad mfpr dcPerr out of shadow of
2757 NOP // last store
2758 NOP
2759 SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
2760
2761 // read cbox ipr state
2762
2763//orig mb
2764//orig ldqp r2, ev5__sc_ctl(r14)
2765//orig ldqp r13, ld_lock(r14)
2766//orig ldqp r4, ev5__sc_addr(r14)
2767//orig ldqp r5, ev5__ei_addr(r14)
2768//orig ldqp r6, ev5__bc_tag_addr(r14)
2769//orig ldqp r7, ev5__fill_syn(r14)
2770//orig bis r5, r4, r31
2771//orig bis r7, r6, r31 // make sure previous loads finish before reading stat registers which unlock them
2772//orig ldqp r8, ev5__sc_stat(r14) // unlocks sc_stat,sc_addr
2773//orig ldqp r9, ev5__ei_stat(r14) // may unlock ei_*, bc_tag_addr, fill_syn
2774//orig ldqp r31, ev5__ei_stat(r14) // ensures it is really unlocked
2775//orig mb
2776
2777#ifndef SIMOS
2778 mb
2779 ldq_p r2, scCtl(r14)
2780 ldq_p r13, ldLock(r14)
2781 ldq_p r4, scAddr(r14)
2782 ldq_p r5, eiAddr(r14)
2783 ldq_p r6, bcTagAddr(r14)
2784 ldq_p r7, fillSyn(r14)
2785 bis r5, r4, zero // Make sure all loads complete before
2786 bis r7, r6, zero // reading registers that unlock them.
2787 ldq_p r8, scStat(r14) // Unlocks scAddr.
2788 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
2789 ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
2790 mb
2791#endif
2792//orig // save cbox ipr state
2793//orig store_reg1 sc_ctl, r2, r1, ipr=1
2794//orig store_reg1 ld_lock, r13, r1, ipr=1
2795//orig store_reg1 sc_addr, r4, r1, ipr=1
2796//orig store_reg1 ei_addr, r5, r1, ipr=1
2797//orig store_reg1 bc_tag_addr, r6, r1, ipr=1
2798//orig store_reg1 fill_syn, r7, r1, ipr=1
2799//orig store_reg1 sc_stat, r8, r1, ipr=1
2800//orig store_reg1 ei_stat, r9, r1, ipr=1
2801//orig //bc_config? sl_rcv?
2802
2803 SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
2804 SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
2805 SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
2806 SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
2807 SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
2808 SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
2809 SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
2810 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
2811
2812// restore impure base //orig
2813//orig unfix_impure_ipr r1
2814 lda r1, -CNS_Q_IPR(r1)
2815
2816// save all floating regs //orig
2817 mfpr r0, icsr // get icsr //orig
2818 or r31, 1, r2 // get a one //orig
2819//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot //orig
2820 sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
2821 or r2, r0, r0 // set FEN on //orig
2822 mtpr r0, icsr // write to icsr, enabling FEN //orig
2823
2824// map the save area virtually
2825// orig mtpr r31, dtb_ia // clear the dtb
2826// orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
2827// orig sll r0, 32, r0 // shift to PFN field
2828// orig lda r2, 0xff(r31) // all read enable and write enable bits set
2829// orig sll r2, 8, r2 // move to PTE location
2830// orig addq r0, r2, r0 // combine with PFN
2831// orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2832// orig mtpr r1, dtb_tag // write TB tag
2833
2834 mtpr r31, dtbIa // Clear all DTB entries
2835 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2836 sll r0, pte_v_pfn, r0 // Shift to form PFN
2837 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2838 mtpr r0, dtbPte // Load the PTE and set valid
2839 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2840
2841
2842//orig // map the next page too - in case the impure area crosses a page boundary
2843//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
2844//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
2845//orig sll r0, 32, r0 // shift to PFN field
2846//orig lda r2, 0xff(r31) // all read enable and write enable bits set
2847//orig sll r2, 8, r2 // move to PTE location
2848//orig addq r0, r2, r0 // combine with PFN
2849//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2850//orig mtpr r4, dtb_tag // write TB tag
2851
2852 lda r4, (1<<va_s_off)(r1) // Generate address for next page
2853 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2854 sll r0, pte_v_pfn, r0 // Shift to form PFN
2855 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2856 mtpr r0, dtbPte // Load the PTE and set valid
2857 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2858
2859 sll r31, 0, r31 // stall cycle 1 // orig
2860 sll r31, 0, r31 // stall cycle 2 // orig
2861 sll r31, 0, r31 // stall cycle 3 // orig
2862 nop // orig
2863
2864//orig // add offset for saving fpr regs
2865//orig fix_impure_gpr r1
2866
2867 lda r1, 0x200(r1) // Point to center of CPU segment
2868
2869// now save the regs - F0-F31
2870
2871//orig #define t 0
2872//orig .repeat 32
2873//orig store_reg \t , fpu=1
2874//orig #define t t + 1
2875//orig .endr
2876
2877 mf_fpcr f0 // original
2878
2879 SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
2880 SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
2881 SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
2882 SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
2883 SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
2884 SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
2885 SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
2886 SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
2887 SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
2888 SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
2889 SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
2890 SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
2891 SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
2892 SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
2893 SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
2894 SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
2895 SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
2896 SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
2897 SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
2898 SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
2899 SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2900 SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2901 SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2902 SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2903 SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2904 SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2905 SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2906 SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2907 SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2908 SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2909 SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2910 SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2911
2912//orig //switch impure offset from gpr to ipr---
2913//orig unfix_impure_gpr r1
2914//orig fix_impure_ipr r1
2915//orig store_reg1 fpcsr, f0, r1, fpcsr=1
2916
2917 SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach// pb
2918 lda r1, -0x200(r1) // Restore the impure base address
2919
2920//orig // and back to gpr ---
2921//orig unfix_impure_ipr r1
2922//orig fix_impure_gpr r1
2923
2924//orig lda r0, cns_mchksize(r31) // get size of mchk area
2925//orig store_reg1 mchkflag, r0, r1, ipr=1
2926//orig mb
2927
2928 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
2929 // save this using the IPR base (it is closer) not the GRP base as they used...pb
2930 lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
2931 SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2932 mb
2933
2934//orig or r31, 1, r0 // get a one
2935//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
2936//orig mb
2937
2938 lda r1, -CNS_Q_IPR(r1) // back to the base
2939 lda r1, 0x200(r1) // Point to center of CPU segment
2940 or r31, 1, r0 // get a one
2941 SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
2942 mb
2943
2944//orig // restore impure area base
2945//orig unfix_impure_gpr r1
2946 lda r1, -0x200(r1) // Point to center of CPU segment
2947
2948 mtpr r31, dtb_ia // clear the dtb //orig
2949 mtpr r31, itb_ia // clear the itb //orig
2950
2951//orig pvc_jsr savsta, bsr=1, dest=1
2952 ret r31, (r3) // and back we go
2953#endif
2954
2955
2956#if remove_restore_state == 0
2957
2958
2959// .sbttl "PAL_RESTORE_STATE"
2960//+
2961//
2962// Pal_restore_state
2963//
2964//
2965// register usage:
2966// r1 = addr of impure area
2967// r3 = return_address
2968// all other regs are scratchable, as they are about to
2969// be reloaded from ram.
2970//
2971// Function:
2972// All chip state restored, all SRs, FRs, PTs, IPRs
2973// *** except R1, R3, PT0, PT4, PT5 ***
2974//
2975//-
2976 ALIGN_BLOCK
2977pal_restore_state:
2978
2979//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2980
2981//orig // map the console io area virtually
2982//orig mtpr r31, dtb_ia // clear the dtb
2983//orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
2984//orig sll r0, 32, r0 // shift to PFN field
2985//orig lda r2, 0xff(r31) // all read enable and write enable bits set
2986//orig sll r2, 8, r2 // move to PTE location
2987//orig addq r0, r2, r0 // combine with PFN
2988//orig
2989//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
2990//orig mtpr r1, dtb_tag // write TB tag
2991//orig
2992
2993 mtpr r31, dtbIa // Clear all DTB entries
2994 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2995 sll r0, pte_v_pfn, r0 // Shift to form PFN
2996 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2997 mtpr r0, dtbPte // Load the PTE and set valid
2998 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2999
3000
3001//orig // map the next page too, in case impure area crosses page boundary
3002//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
3003//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
3004//orig sll r0, 32, r0 // shift to PFN field
3005//orig lda r2, 0xff(r31) // all read enable and write enable bits set
3006//orig sll r2, 8, r2 // move to PTE location
3007//orig addq r0, r2, r0 // combine with PFN
3008//orig
3009//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
3010//orig mtpr r4, dtb_tag // write TB tag - no virtual mbox instruction for 3 cycles
3011
3012 lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
3013 srl r4, va_s_off, r0 // Clean off byte-within-page offset
3014 sll r0, pte_v_pfn, r0 // Shift to form PFN
3015 lda r0, pte_m_prot(r0) // Set all read/write enable bits
3016 mtpr r0, dtbPte // Load the PTE and set valid
3017 mtpr r4, dtbTag // Write the PTE and tag into the DTB
3018
3019//orig // save all floating regs
3020//orig mfpr r0, icsr // get icsr
3021//orig// assume ICSR_V_SDE gt <ICSR_V_FPE> // assertion checker
3022//orig or r31, <<1@<ICSR_V_SDE-ICSR_V_FPE>> ! 1>, r2 // set SDE and FPE
3023//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot
3024//orig or r2, r0, r0 // set FEN on
3025//orig mtpr r0, icsr // write to icsr, enabling FEN and SDE. 3 bubbles to floating instr.
3026
3027 mfpr r0, icsr // Get current ICSR
3028 bis zero, 1, r2 // Get a '1'
3029 or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
3030 sll r2, icsr_v_fpe, r2 // Shift bits into position
3031 bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
3032 mtpr r0, icsr // Update the chip
3033
3034 mfpr r31, pt0 // FPE bubble cycle 1 //orig
3035 mfpr r31, pt0 // FPE bubble cycle 2 //orig
3036 mfpr r31, pt0 // FPE bubble cycle 3 //orig
3037
3038//orig fix_impure_ipr r1
3039//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
3040//orig mt_fpcr f0
3041//orig
3042//orig unfix_impure_ipr r1
3043//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
3044//orig
3045//orig // restore all floating regs
3046//orig#define t 0
3047//orig .repeat 32
3048//orig restore_reg \t , fpu=1
3049//orig#define t t + 1
3050//orig .endr
3051
3052 lda r1, 200(r1) // Point to base of IPR area again
3053 RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
3054 mt_fpcr f0 // original
3055
3056 lda r1, 0x200(r1) // point to center of CPU segment
3057 RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
3058 RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
3059 RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
3060 RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
3061 RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
3062 RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
3063 RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
3064 RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
3065 RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
3066 RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
3067 RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
3068 RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
3069 RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
3070 RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
3071 RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
3072 RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
3073 RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
3074 RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
3075 RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
3076 RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
3077 RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
3078 RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
3079 RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
3080 RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
3081 RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
3082 RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
3083 RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
3084 RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
3085 RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
3086 RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
3087 RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
3088 RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
3089
3090//orig // switch impure pointer from gpr to ipr area --
3091//orig unfix_impure_gpr r1
3092//orig fix_impure_ipr r1
3093//orig
3094//orig // restore all pal regs
3095//orig#define t 1
3096//orig .repeat 23
3097//orig restore_reg \t , pal=1
3098//orig#define t t + 1
3099//orig .endr
3100
3101 lda r1, -0x200(r1) // Restore base address of impure area.
3102 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
3103 RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
3104 RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
3105 RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
3106 RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
3107 RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
3108 RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
3109 RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
3110 RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
3111 RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
3112 RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
3113 RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
3114 RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
3115 RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
3116 RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
3117 RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
3118 RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
3119 RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
3120 RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
3121 RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
3122 RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
3123 RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
3124 RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
3125 RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
3126 RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
3127
3128
3129//orig restore_reg exc_addr, ipr=1 // restore ipr
3130//orig restore_reg pal_base, ipr=1 // restore ipr
3131//orig restore_reg ipl, ipr=1 // restore ipr
3132//orig restore_reg ps, ipr=1 // restore ipr
3133//orig mtpr r0, dtb_cm // set current mode in mbox too
3134//orig restore_reg itb_asn, ipr=1
3135//orig srl r0, itb_asn_v_asn, r0
3136//orig sll r0, dtb_asn_v_asn, r0
3137//orig mtpr r0, dtb_asn // set ASN in Mbox too
3138//orig restore_reg ivptbr, ipr=1
3139//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
3140//orig restore_reg mcsr, ipr=1
3141//orig restore_reg aster, ipr=1
3142//orig restore_reg astrr, ipr=1
3143//orig restore_reg sirr, ipr=1
3144//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
3145//orig mfpr r31, pt0 // (may issue with mt maf_mode)
3146//orig mfpr r31, pt0 // bubble cycle 1
3147//orig mfpr r31, pt0 // bubble cycle 2
3148//orig mfpr r31, pt0 // bubble cycle 3
3149//orig mfpr r31, pt0 // (may issue with following ld)
3150
3151 // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
3152 RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
3153 RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
3154 RESTORE_IPR(ipl,CNS_Q_IPL,r1)
3155 RESTORE_IPR(ips,CNS_Q_IPS,r1)
3156 mtpr r0, dtbCm // Set Mbox current mode too.
3157 RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
3158 srl r0, 4, r0
3159 sll r0, 57, r0
3160 mtpr r0, dtbAsn // Set Mbox ASN too
3161 RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
3162 mtpr r0, mVptBr // Set Mbox VptBr too
3163 RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
3164 RESTORE_IPR(aster,CNS_Q_ASTER,r1)
3165 RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
3166 RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
3167 RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
3168 STALL
3169 STALL
3170 STALL
3171 STALL
3172 STALL
3173
3174
3175 // restore all integer shadow regs
3176//orig#define t 8
3177//orig .repeat 7
3178//orig restore_reg \t, shadow=1
3179//orig#define t t + 1
3180//orig .endr
3181//orig restore_reg 25, shadow=1
3182//orig restore_reg dc_mode, ipr=1 // no mbox instructions for 4 cycles
3183
3184 RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
3185 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
3186 RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
3187 RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
3188 RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
3189 RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
3190 RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
3191 RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
3192 RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
3193
3194 //
3195 // Get out of shadow mode
3196 //
3197
3198 mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway) //orig
3199 mfpr r31, pt0 // "" //orig
3200 mfpr r0, icsr // Get icsr //orig
3201//orig ldah r2, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
3202 ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location //orig
3203 bic r0, r2, r2 // ICSR with SDE clear //orig
3204 mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles //orig
3205
3206 mfpr r31, pt0 // SDE bubble cycle 1 //orig
3207 mfpr r31, pt0 // SDE bubble cycle 2 //orig
3208 mfpr r31, pt0 // SDE bubble cycle 3 //orig
3209 nop //orig
3210
3211//orig // switch impure pointer from ipr to gpr area --
3212//orig unfix_impure_ipr r1
3213//orig fix_impure_gpr r1
3214//orig // restore all integer regs
3215//orig#define t 4
3216//orig .repeat 28
3217//orig restore_reg \t
3218//orig#define t t + 1
3219//orig .endr
3220
3221// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
3222
3223 lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
3224 lda r1, 0x200(r1) // Point to center of CPU segment
3225
3226 RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
3227 RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
3228 RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
3229 RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
3230 RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
3231 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
3232 RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
3233 RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
3234 RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
3235 RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
3236 RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
3237 RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
3238 RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
3239 RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
3240 RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
3241 RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
3242 RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
3243 RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
3244 RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
3245 RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
3246 RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
3247 RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
3248 RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
3249 RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
3250 RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
3251 RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
3252 RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
3253 RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
3254
3255//orig // switch impure pointer from gpr to ipr area --
3256//orig unfix_impure_gpr r1
3257//orig fix_impure_ipr r1
3258//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
3259
3260 lda t0, -0x200(t0) // Restore base address of impure area.
3261 lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
3262 RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
3263
3264//orig // and back again --
3265//orig unfix_impure_ipr r1
3266//orig fix_impure_gpr r1
3267//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
3268//orig mb
3269
3270 lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
3271 lda t0, 0x200(t0) // and back to center of CPU segment
3272 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
3273 mb
3274
3275//orig // and back we go
3276//orig// restore_reg 3
3277//orig restore_reg 2
3278//orig// restore_reg 1
3279//orig restore_reg 0
3280//orig // restore impure area base
3281//orig unfix_impure_gpr r1
3282
3283 RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
3284 RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
3285 lda r1, -0x200(r1) // Restore impure base address
3286
3287 mfpr r31, pt0 // stall for ldqp above //orig
3288
3289 mtpr r31, dtb_ia // clear the tb //orig
3290 mtpr r31, itb_ia // clear the itb //orig
3291
3292//orig pvc_jsr rststa, bsr=1, dest=1
3293 ret r31, (r3) // back we go //orig
3294#endif
3295
3296
3297//+
3298// pal_pal_bug_check -- code has found a bugcheck situation.
3299// Set things up and join common machine check flow.
3300//
3301// Input:
3302// r14 - exc_addr
3303//
3304// On exit:
3305// pt0 - saved r0
3306// pt1 - saved r1
3307// pt4 - saved r4
3308// pt5 - saved r5
3309// pt6 - saved r6
3310// pt10 - saved exc_addr
3311// pt_misc<47:32> - mchk code
3312// pt_misc<31:16> - scb vector
3313// r14 - base of Cbox IPRs in IO space
3314// MCES<mchk> is set
3315//-
3316
3317 ALIGN_BLOCK
3318 .globl pal_pal_bug_check_from_int
3319pal_pal_bug_check_from_int:
3320 DEBUGSTORE(0x79)
3321//simos DEBUG_EXC_ADDR()
3322 DEBUGSTORE(0x20)
3323//simos bsr r25, put_hex
3324 lda r25, mchk_c_bugcheck(r31)
3325 addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
3326 br r31, pal_pal_mchk
3327 nop
3328
3329pal_pal_bug_check:
3330 lda r25, mchk_c_bugcheck(r31)
3331
3332pal_pal_mchk:
3333 sll r25, 32, r25 // Move mchk code to position
3334
3335 mtpr r14, pt10 // Stash exc_addr
3336 mtpr r14, exc_addr
3337
3338 mfpr r12, pt_misc // Get MCES and scratch
3339 zap r12, 0x3c, r12
3340
3341 or r12, r25, r12 // Combine mchk code
3342 lda r25, scb_v_procmchk(r31) // Get SCB vector
3343
3344 sll r25, 16, r25 // Move SCBv to position
3345 or r12, r25, r25 // Combine SCBv
3346
3347 mtpr r0, pt0 // Stash for scratch
3348 bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
3349
3350 mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
3351 ldah r14, 0xfff0(r31)
3352
3353 mtpr r1, pt1 // Stash for scratch
3354 zap r14, 0xE0, r14 // Get Cbox IPR base
3355
3356 mtpr r4, pt4
3357 mtpr r5, pt5
3358
3359 mtpr r6, pt6
3360 blbs r12, sys_double_machine_check // MCHK halt if double machine check
3361
3362 br r31, sys_mchk_collect_iprs // Join common machine check flow
3363
3364// align_to_call_pal_section // Align to address of first call_pal entry point - 2000
3365
3366// .sbttl "HALT - PALcode for HALT instruction"
3367
3368//+
3369//
3370// Entry:
3371// Vectored into via hardware PALcode instruction dispatch.
3372//
3373// Function:
3374// GO to console code
3375//
3376//-
3377
3378 .text 1
3379// . = 0x2000
3380 CALL_PAL_PRIV(PAL_HALT_ENTRY)
3381call_pal_halt:
3382#if rax_mode == 0
3383 mfpr r31, pt0 // Pad exc_addr read
3384 mfpr r31, pt0
3385
3386 mfpr r12, exc_addr // get PC
3387 subq r12, 4, r12 // Point to the HALT
3388
3389 mtpr r12, exc_addr
3390 mtpr r0, pt0
3391
3392//orig pvc_jsr updpcb, bsr=1
3393 bsr r0, pal_update_pcb // update the pcb
3394 lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
3395 br r31, sys_enter_console // enter the console
3396
3397#else // RAX mode
3398 mb
3399 mb
3400 mtpr r9, ev5__dtb_asn // no Dstream virtual ref for next 3 cycles.
3401 mtpr r9, ev5__itb_asn // E1. Update ITB ASN. No hw_rei for 5 cycles.
3402 mtpr r8, exc_addr // no HW_REI for 1 cycle.
3403 blbc r9, not_begin_case
3404 mtpr r31, ev5__dtb_ia // clear DTB. No Dstream virtual ref for 2 cycles.
3405 mtpr r31, ev5__itb_ia // clear ITB.
3406
3407not_begin_case:
3408 nop
3409 nop
3410
3411 nop
3412 nop // pad mt itb_asn ->hw_rei_stall
3413
3414 hw_rei_stall
3415#endif
3416
3417// .sbttl "CFLUSH- PALcode for CFLUSH instruction"
3418
3419//+
3420//
3421// Entry:
3422// Vectored into via hardware PALcode instruction dispatch.
3423//
3424// R16 - contains the PFN of the page to be flushed
3425//
3426// Function:
3427// Flush all Dstream caches of 1 entire page
3428// The CFLUSH routine is in the system specific module.
3429//
3430//-
3431
3432 CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
3433Call_Pal_Cflush:
3434 br r31, sys_cflush
3435
3436// .sbttl "DRAINA - PALcode for DRAINA instruction"
3437//+
3438//
3439// Entry:
3440// Vectored into via hardware PALcode instruction dispatch.
3441// Implicit TRAPB performed by hardware.
3442//
3443// Function:
3444// Stall instruction issue until all prior instructions are guaranteed to
3445// complete without incurring aborts. For the EV5 implementation, this
3446// means waiting until all pending DREADS are returned.
3447//
3448//-
3449
3450 CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
3451Call_Pal_Draina:
3452 ldah r14, 0x100(r31) // Init counter. Value?
3453 nop
3454
3455DRAINA_LOOP:
3456 subq r14, 1, r14 // Decrement counter
3457 mfpr r13, ev5__maf_mode // Fetch status bit
3458
3459 srl r13, maf_mode_v_dread_pending, r13
3460 ble r14, DRAINA_LOOP_TOO_LONG
3461
3462 nop
3463 blbs r13, DRAINA_LOOP // Wait until all DREADS clear
3464
3465 hw_rei
3466
3467DRAINA_LOOP_TOO_LONG:
3468 br r31, call_pal_halt
3469
3470// .sbttl "CALL_PAL OPCDECs"
3471
3472 CALL_PAL_PRIV(0x0003)
3473CallPal_OpcDec03:
3474 br r31, osfpal_calpal_opcdec
3475
3476 CALL_PAL_PRIV(0x0004)
3477CallPal_OpcDec04:
3478 br r31, osfpal_calpal_opcdec
3479
3480 CALL_PAL_PRIV(0x0005)
3481CallPal_OpcDec05:
3482 br r31, osfpal_calpal_opcdec
3483
3484 CALL_PAL_PRIV(0x0006)
3485CallPal_OpcDec06:
3486 br r31, osfpal_calpal_opcdec
3487
3488 CALL_PAL_PRIV(0x0007)
3489CallPal_OpcDec07:
3490 br r31, osfpal_calpal_opcdec
3491
3492 CALL_PAL_PRIV(0x0008)
3493CallPal_OpcDec08:
3494 br r31, osfpal_calpal_opcdec
3495
3496// .sbttl "CSERVE- PALcode for CSERVE instruction"
3497//+
3498//
3499// Entry:
3500// Vectored into via hardware PALcode instruction dispatch.
3501//
3502// Function:
3503// Various functions for private use of console software
3504//
3505// option selector in r0
3506// arguments in r16....
3507// The CSERVE routine is in the system specific module.
3508//
3509//-
3510
3511 CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
3512Call_Pal_Cserve:
3513 br r31, sys_cserve
3514
3515// .sbttl "swppal - PALcode for swppal instruction"
3516
3517//+
3518//
3519// Entry:
3520// Vectored into via hardware PALcode instruction dispatch.
3521// Vectored into via hardware PALcode instruction dispatch.
3522// R16 contains the new PAL identifier
3523// R17:R21 contain implementation-specific entry parameters
3524//
3525// R0 receives status:
3526// 0 success (PAL was switched)
3527// 1 unknown PAL variant
3528// 2 known PAL variant, but PAL not loaded
3529//
3530//
3531// Function:
3532// Swap control to another PAL.
3533//-
3534
3535 CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
3536Call_Pal_Swppal:
3537 cmpule r16, 255, r0 // see if a kibble was passed
3538 cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
3539
3540 or r16, r31, r3 // set r3 incase this is a address
3541 blbc r0, swppal_cont // nope, try it as an address
3542
3543 cmpeq r16, 2, r0 // is it our friend OSF?
3544 blbc r0, swppal_fail // nope, don't know this fellow
3545
3546 br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
3547
3548// .global osfpal_hw_entry_reset
3549// .weak osfpal_hw_entry_reset
3550// .long <osfpal_hw_entry_reset-pal_start>
3551//orig halt // don't know how to get the address here - kludge ok, load pal at 0
3552 .long 0 // ?? hack upon hack...pb
3553
3554CALL_PAL_SWPPAL_10_: ldlp r3, 0(r2) // fetch target addr
3555// ble r3, swppal_fail ; if OSF not linked in say not loaded.
3556 mfpr r2, pal_base // fetch pal base
3557
3558 addq r2, r3, r3 // add pal base
3559 lda r2, 0x3FFF(r31) // get pal base checker mask
3560
3561 and r3, r2, r2 // any funky bits set?
3562 cmpeq r2, 0, r0 //
3563
3564 blbc r0, swppal_fail // return unknown if bad bit set.
3565 br r31, swppal_cont
3566
3567// .sbttl "CALL_PAL OPCDECs"
3568
3569 CALL_PAL_PRIV(0x000B)
3570CallPal_OpcDec0B:
3571 br r31, osfpal_calpal_opcdec
3572
3573 CALL_PAL_PRIV(0x000C)
3574CallPal_OpcDec0C:
3575 br r31, osfpal_calpal_opcdec
3576
3577// .sbttl "wripir- PALcode for wripir instruction"
3578//+
3579//
3580// Entry:
3581// Vectored into via hardware PALcode instruction dispatch.
3582// r16 = processor number to interrupt
3583//
3584// Function:
3585// IPIR <- R16
3586// Handled in system-specific code
3587//
3588// Exit:
3589// interprocessor interrupt is recorded on the target processor
3590// and is initiated when the proper enabling conditions are present.
3591//-
3592
3593 CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
3594Call_Pal_Wrpir:
3595 br r31, sys_wripir
3596
3597// .sbttl "CALL_PAL OPCDECs"
3598
3599 CALL_PAL_PRIV(0x000E)
3600CallPal_OpcDec0E:
3601 br r31, osfpal_calpal_opcdec
3602
3603 CALL_PAL_PRIV(0x000F)
3604CallPal_OpcDec0F:
3605 br r31, osfpal_calpal_opcdec
3606
3607// .sbttl "rdmces- PALcode for rdmces instruction"
3608
3609//+
3610//
3611// Entry:
3612// Vectored into via hardware PALcode instruction dispatch.
3613//
3614// Function:
3615// R0 <- ZEXT(MCES)
3616//-
3617
3618 CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
3619Call_Pal_Rdmces:
3620 mfpr r0, pt_mces // Read from PALtemp
3621 and r0, mces_m_all, r0 // Clear other bits
3622
3623 hw_rei
3624
3625// .sbttl "wrmces- PALcode for wrmces instruction"
3626
3627//+
3628//
3629// Entry:
3630// Vectored into via hardware PALcode instruction dispatch.
3631//
3632// Function:
3633// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
3634// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
3635// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
3636// MCES<3> <- R16<3> (DPC)
3637// MCES<4> <- R16<4> (DSC)
3638//
3639//-
3640
3641 CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
3642Call_Pal_Wrmces:
3643 and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
3644 mfpr r14, pt_mces // Get current value
3645
3646 ornot r31, r13, r13 // Flip all the bits
3647 and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
3648
3649 and r14, r13, r1 // Update MCHK, SCE, PCE
3650 bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
3651
3652 or r1, r17, r1 // Update DPC and DSC
3653 mtpr r1, pt_mces // Write MCES back
3654
3655#if rawhide_system == 0
3656 nop // Pad to fix PT write->read restriction
3657#else
3658 blbs r16, RAWHIDE_clear_mchk_lock // Clear logout from lock
3659#endif
3660
3661 nop
3662 hw_rei
3663
3664
3665
3666// .sbttl "CALL_PAL OPCDECs"
3667
3668 CALL_PAL_PRIV(0x0012)
3669CallPal_OpcDec12:
3670 br r31, osfpal_calpal_opcdec
3671
3672 CALL_PAL_PRIV(0x0013)
3673CallPal_OpcDec13:
3674 br r31, osfpal_calpal_opcdec
3675
3676 CALL_PAL_PRIV(0x0014)
3677CallPal_OpcDec14:
3678 br r31, osfpal_calpal_opcdec
3679
3680 CALL_PAL_PRIV(0x0015)
3681CallPal_OpcDec15:
3682 br r31, osfpal_calpal_opcdec
3683
3684 CALL_PAL_PRIV(0x0016)
3685CallPal_OpcDec16:
3686 br r31, osfpal_calpal_opcdec
3687
3688 CALL_PAL_PRIV(0x0017)
3689CallPal_OpcDec17:
3690 br r31, osfpal_calpal_opcdec
3691
3692 CALL_PAL_PRIV(0x0018)
3693CallPal_OpcDec18:
3694 br r31, osfpal_calpal_opcdec
3695
3696 CALL_PAL_PRIV(0x0019)
3697CallPal_OpcDec19:
3698 br r31, osfpal_calpal_opcdec
3699
3700 CALL_PAL_PRIV(0x001A)
3701CallPal_OpcDec1A:
3702 br r31, osfpal_calpal_opcdec
3703
3704 CALL_PAL_PRIV(0x001B)
3705CallPal_OpcDec1B:
3706 br r31, osfpal_calpal_opcdec
3707
3708 CALL_PAL_PRIV(0x001C)
3709CallPal_OpcDec1C:
3710 br r31, osfpal_calpal_opcdec
3711
3712 CALL_PAL_PRIV(0x001D)
3713CallPal_OpcDec1D:
3714 br r31, osfpal_calpal_opcdec
3715
3716 CALL_PAL_PRIV(0x001E)
3717CallPal_OpcDec1E:
3718 br r31, osfpal_calpal_opcdec
3719
3720 CALL_PAL_PRIV(0x001F)
3721CallPal_OpcDec1F:
3722 br r31, osfpal_calpal_opcdec
3723
3724 CALL_PAL_PRIV(0x0020)
3725CallPal_OpcDec20:
3726 br r31, osfpal_calpal_opcdec
3727
3728 CALL_PAL_PRIV(0x0021)
3729CallPal_OpcDec21:
3730 br r31, osfpal_calpal_opcdec
3731
3732 CALL_PAL_PRIV(0x0022)
3733CallPal_OpcDec22:
3734 br r31, osfpal_calpal_opcdec
3735
3736 CALL_PAL_PRIV(0x0023)
3737CallPal_OpcDec23:
3738 br r31, osfpal_calpal_opcdec
3739
3740 CALL_PAL_PRIV(0x0024)
3741CallPal_OpcDec24:
3742 br r31, osfpal_calpal_opcdec
3743
3744 CALL_PAL_PRIV(0x0025)
3745CallPal_OpcDec25:
3746 br r31, osfpal_calpal_opcdec
3747
3748 CALL_PAL_PRIV(0x0026)
3749CallPal_OpcDec26:
3750 br r31, osfpal_calpal_opcdec
3751
3752 CALL_PAL_PRIV(0x0027)
3753CallPal_OpcDec27:
3754 br r31, osfpal_calpal_opcdec
3755
3756 CALL_PAL_PRIV(0x0028)
3757CallPal_OpcDec28:
3758 br r31, osfpal_calpal_opcdec
3759
3760 CALL_PAL_PRIV(0x0029)
3761CallPal_OpcDec29:
3762 br r31, osfpal_calpal_opcdec
3763
3764 CALL_PAL_PRIV(0x002A)
3765CallPal_OpcDec2A:
3766 br r31, osfpal_calpal_opcdec
3767
3768// .sbttl "wrfen - PALcode for wrfen instruction"
3769
3770//+
3771//
3772// Entry:
3773// Vectored into via hardware PALcode instruction dispatch.
3774//
3775// Function:
3776// a0<0> -> ICSR<FPE>
3777// Store new FEN in PCB
3778// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16) are UNPREDICTABLE
3779//
3780// Issue: What about pending FP loads when FEN goes from on->off????
3781//-
3782
3783 CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
3784Call_Pal_Wrfen:
3785 or r31, 1, r13 // Get a one
3786 mfpr r1, ev5__icsr // Get current FPE
3787
3788 sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
3789 and r16, 1, r16 // clean new fen
3790
3791 sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
3792 bic r1, r13, r1 // zero icsr<fpe>
3793
3794 or r1, r12, r1 // Or new FEN into ICSR
3795 mfpr r12, pt_pcbb // Get PCBB - E1
3796
3797 mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
3798 stlp r16, osfpcb_q_fen(r12) // Store FEN in PCB.
3799
3800 mfpr r31, pt0 // Pad ICSR<FPE> write.
3801 mfpr r31, pt0
3802
3803 mfpr r31, pt0
3804// pvc_violate 225 // cuz PVC can't distinguish which bits changed
3805 hw_rei
3806
3807
3808 CALL_PAL_PRIV(0x002C)
3809CallPal_OpcDec2C:
3810 br r31, osfpal_calpal_opcdec
3811
3812// .sbttl "wrvptpr - PALcode for wrvptpr instruction"
3813//+
3814//
3815// Entry:
3816// Vectored into via hardware PALcode instruction dispatch.
3817//
3818// Function:
3819// vptptr <- a0 (r16)
3820//-
3821
3822 CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
3823Call_Pal_Wrvptptr:
3824 mtpr r16, ev5__mvptbr // Load Mbox copy
3825 mtpr r16, ev5__ivptbr // Load Ibox copy
3826 nop // Pad IPR write
3827 nop
3828 hw_rei
3829
3830 CALL_PAL_PRIV(0x002E)
3831CallPal_OpcDec2E:
3832 br r31, osfpal_calpal_opcdec
3833
3834 CALL_PAL_PRIV(0x002F)
3835CallPal_OpcDec2F:
3836 br r31, osfpal_calpal_opcdec
3837
3838// .sbttl "swpctx- PALcode for swpctx instruction"
3839
3840//+
3841//
3842// Entry:
3843// hardware dispatch via callPal instruction
3844// R16 -> new pcb
3845//
3846// Function:
3847// dynamic state moved to old pcb
3848// new state loaded from new pcb
3849// pcbb pointer set
3850// old pcbb returned in R0
3851//
3852// Note: need to add perf monitor stuff
3853//-
3854
3855 CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
3856Call_Pal_Swpctx:
3857 rpcc r13 // get cyccounter
3858 mfpr r0, pt_pcbb // get pcbb
3859
3860 ldqp r22, osfpcb_q_fen(r16) // get new fen/pme
3861 ldqp r23, osfpcb_l_cc(r16) // get new asn
3862
3863 srl r13, 32, r25 // move offset
3864 mfpr r24, pt_usp // get usp
3865
3866 stqp r30, osfpcb_q_ksp(r0) // store old ksp
3867// pvc_violate 379 // stqp can't trap except replay. only problem if mf same ipr in same shadow.
3868 mtpr r16, pt_pcbb // set new pcbb
3869
3870 stqp r24, osfpcb_q_usp(r0) // store usp
3871 addl r13, r25, r25 // merge for new time
3872
3873 stlp r25, osfpcb_l_cc(r0) // save time
3874 ldah r24, (1<<(icsr_v_fpe-16))(r31)
3875
3876 and r22, 1, r12 // isolate fen
3877 mfpr r25, icsr // get current icsr
3878
3879 ev5_pass2 lda r24, (1<<icsr_v_pmp)(r24)
3880 br r31, swpctx_cont
3881
3882// .sbttl "wrval - PALcode for wrval instruction"
3883//+
3884//
3885// Entry:
3886// Vectored into via hardware PALcode instruction dispatch.
3887//
3888// Function:
3889// sysvalue <- a0 (r16)
3890//-
3891
3892 CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
3893Call_Pal_Wrval:
3894 nop
3895 mtpr r16, pt_sysval // Pad paltemp write
3896 nop
3897 nop
3898 hw_rei
3899
3900
3901// .sbttl "rdval - PALcode for rdval instruction"
3902
3903//+
3904//
3905// Entry:
3906// Vectored into via hardware PALcode instruction dispatch.
3907//
3908// Function:
3909// v0 (r0) <- sysvalue
3910//-
3911
3912 CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
3913Call_Pal_Rdval:
3914 nop
3915 mfpr r0, pt_sysval
3916 nop
3917 hw_rei
3918
3919// .sbttl "tbi - PALcode for tbi instruction"
3920//+
3921//
3922// Entry:
3923// Vectored into via hardware PALcode instruction dispatch.
3924//
3925// Function:
3926// TB invalidate
3927// r16/a0 = TBI type
3928// r17/a1 = Va for TBISx instructions
3929//-
3930
3931 CALL_PAL_PRIV(PAL_TBI_ENTRY)
3932Call_Pal_Tbi:
3933 addq r16, 2, r16 // change range to 0-2
3934 br r23, CALL_PAL_tbi_10_ // get our address
3935
3936CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
3937 lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
3938 sll r16, 4, r16 // * 16
3939 blbc r22, CALL_PAL_tbi_30_ // go rei, if not
3940
3941 addq r23, r16, r23 // addr of our code
3942//orig pvc_jsr tbi
3943 jmp r31, (r23) // and go do it
3944
3945CALL_PAL_tbi_30_:
3946 hw_rei
3947 nop
3948
3949// .sbttl "wrent - PALcode for wrent instruction"
3950//+
3951//
3952// Entry:
3953// Vectored into via hardware PALcode instruction dispatch.
3954//
3955// Function:
3956// Update ent* in paltemps
3957// r16/a0 = Address of entry routine
3958// r17/a1 = Entry Number 0..5
3959//
3960// r22, r23 trashed
3961//-
3962
3963 CALL_PAL_PRIV(PAL_WRENT_ENTRY)
3964Call_Pal_Wrent:
3965 cmpult r17, 6, r22 // see if in range
3966 br r23, CALL_PAL_wrent_10_ // get our address
3967
3968CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
3969 blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
3970
3971 lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
3972 sll r17, 4, r17 // *16
3973
3974 addq r17, r23, r23 // Get address in table
3975//orig pvc_jsr wrent
3976 jmp r31, (r23) // and go do it
3977
3978CALL_PAL_wrent_30_:
3979 hw_rei // out of range, just return
3980
3981// .sbttl "swpipl - PALcode for swpipl instruction"
3982//+
3983//
3984// Entry:
3985// Vectored into via hardware PALcode instruction dispatch.
3986//
3987// Function:
3988// v0 (r0) <- PS<IPL>
3989// PS<IPL> <- a0<2:0> (r16)
3990//
3991// t8 (r22) is scratch
3992//-
3993
3994 CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3995Call_Pal_Swpipl:
3996 and r16, osfps_m_ipl, r16 // clean New ipl
3997 mfpr r22, pt_intmask // get int mask
3998
3999 extbl r22, r16, r22 // get mask for this ipl
4000 bis r11, r31, r0 // return old ipl
4001
4002 bis r16, r31, r11 // set new ps
4003 mtpr r22, ev5__ipl // set new mask
4004
4005 mfpr r31, pt0 // pad ipl write
4006 mfpr r31, pt0 // pad ipl write
4007
4008 hw_rei // back
4009
4010// .sbttl "rdps - PALcode for rdps instruction"
4011//+
4012//
4013// Entry:
4014// Vectored into via hardware PALcode instruction dispatch.
4015//
4016// Function:
4017// v0 (r0) <- ps
4018//-
4019
4020 CALL_PAL_PRIV(PAL_RDPS_ENTRY)
4021Call_Pal_Rdps:
4022 bis r11, r31, r0 // Fetch PALshadow PS
4023 nop // Must be 2 cycles long
4024 hw_rei
4025
4026// .sbttl "wrkgp - PALcode for wrkgp instruction"
4027//+
4028//
4029// Entry:
4030// Vectored into via hardware PALcode instruction dispatch.
4031//
4032// Function:
4033// kgp <- a0 (r16)
4034//-
4035
4036 CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
4037Call_Pal_Wrkgp:
4038 nop
4039 mtpr r16, pt_kgp
4040 nop // Pad for pt write->read restriction
4041 nop
4042 hw_rei
4043
4044// .sbttl "wrusp - PALcode for wrusp instruction"
4045//+
4046//
4047// Entry:
4048// Vectored into via hardware PALcode instruction dispatch.
4049//
4050// Function:
4051// usp <- a0 (r16)
4052//-
4053
4054 CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
4055Call_Pal_Wrusp:
4056 nop
4057 mtpr r16, pt_usp
4058 nop // Pad possible pt write->read restriction
4059 nop
4060 hw_rei
4061
4062// .sbttl "wrperfmon - PALcode for wrperfmon instruction"
4063//+
4064//
4065// Entry:
4066// Vectored into via hardware PALcode instruction dispatch.
4067//
4068//
4069// Function:
4070// Various control functions for the onchip performance counters
4071//
4072// option selector in r16
4073// option argument in r17
4074// returned status in r0
4075//
4076//
4077// r16 = 0 Disable performance monitoring for one or more cpu's
4078// r17 = 0 disable no counters
4079// r17 = bitmask disable counters specified in bit mask (1=disable)
4080//
4081// r16 = 1 Enable performance monitoring for one or more cpu's
4082// r17 = 0 enable no counters
4083// r17 = bitmask enable counters specified in bit mask (1=enable)
4084//
4085// r16 = 2 Mux select for one or more cpu's
4086// r17 = Mux selection (cpu specific)
4087// <24:19> bc_ctl<pm_mux_sel> field (see spec)
4088// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
4089//
4090// r16 = 3 Options
4091// r17 = (cpu specific)
4092// <0> = 0 log all processes
4093// <0> = 1 log only selected processes
4094// <30,9,8> mode select - ku,kp,kk
4095//
4096// r16 = 4 Interrupt frequency select
4097// r17 = (cpu specific) indicates interrupt frequencies desired for each
4098// counter, with "zero interrupts" being an option
4099// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
4100//
4101// r16 = 5 Read Counters
4102// r17 = na
4103// r0 = value (same format as ev5 pmctr)
4104// <0> = 0 Read failed
4105// <0> = 1 Read succeeded
4106//
4107// r16 = 6 Write Counters
4108// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
4109//
4110// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
4111// r17 = 0 enable no counters
4112// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
4113//
4114//=============================================================================
4115//Assumptions:
4116//PMCTR_CTL:
4117//
4118// <15:14> CTL0 -- encoded frequency select and enable - CTR0
4119// <13:12> CTL1 -- " - CTR1
4120// <11:10> CTL2 -- " - CTR2
4121//
4122// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
4123// <7:6> FRQ1 -- frequency select for CTR1
4124// <5:4> FRQ2 -- frequency select for CTR2
4125//
4126// <0> all vs. select processes (0=all,1=select)
4127//
4128// where
4129// FRQx<1:0>
4130// 0 1 disable interrupt
4131// 1 0 frequency = 65536 (16384 for ctr2)
4132// 1 1 frequency = 256
4133// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
4134//
4135//=============================================================================
4136//
4137 CALL_PAL_PRIV(0x0039)
4138// unsupported in Hudson code .. pboyle Nov/95
4139CALL_PAL_Wrperfmon:
4140#if perfmon_debug == 0
4141 // "real" performance monitoring code
4142 cmpeq r16, 1, r0 // check for enable
4143 bne r0, perfmon_en // br if requested to enable
4144
4145 cmpeq r16, 2, r0 // check for mux ctl
4146 bne r0, perfmon_muxctl // br if request to set mux controls
4147
4148 cmpeq r16, 3, r0 // check for options
4149 bne r0, perfmon_ctl // br if request to set options
4150
4151 cmpeq r16, 4, r0 // check for interrupt frequency select
4152 bne r0, perfmon_freq // br if request to change frequency select
4153
4154 cmpeq r16, 5, r0 // check for counter read request
4155 bne r0, perfmon_rd // br if request to read counters
4156
4157 cmpeq r16, 6, r0 // check for counter write request
4158 bne r0, perfmon_wr // br if request to write counters
4159
4160 cmpeq r16, 7, r0 // check for counter clear/enable request
4161 bne r0, perfmon_enclr // br if request to clear/enable counters
4162
4163 beq r16, perfmon_dis // br if requested to disable (r16=0)
4164 br r31, perfmon_unknown // br if unknown request
4165#else
4166
4167 br r31, pal_perfmon_debug
4168#endif
4169
4170// .sbttl "rdusp - PALcode for rdusp instruction"
4171//+
4172//
4173// Entry:
4174// Vectored into via hardware PALcode instruction dispatch.
4175//
4176// Function:
4177// v0 (r0) <- usp
4178//-
4179
4180 CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
4181Call_Pal_Rdusp:
4182 nop
4183 mfpr r0, pt_usp
4184 hw_rei
4185
4186
4187 CALL_PAL_PRIV(0x003B)
4188CallPal_OpcDec3B:
4189 br r31, osfpal_calpal_opcdec
4190
4191// .sbttl "whami - PALcode for whami instruction"
4192//+
4193//
4194// Entry:
4195// Vectored into via hardware PALcode instruction dispatch.
4196//
4197// Function:
4198// v0 (r0) <- whami
4199//-
4200 CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
4201Call_Pal_Whami:
4202 nop
4203 mfpr r0, pt_whami // Get Whami
4204 extbl r0, 1, r0 // Isolate just whami bits
4205 hw_rei
4206
4207// .sbttl "retsys - PALcode for retsys instruction"
4208//
4209// Entry:
4210// Vectored into via hardware PALcode instruction dispatch.
4211// 00(sp) contains return pc
4212// 08(sp) contains r29
4213//
4214// Function:
4215// Return from system call.
4216// mode switched from kern to user.
4217// stacks swapped, ugp, upc restored.
4218// r23, r25 junked
4219//-
4220
4221 CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
4222Call_Pal_Retsys:
4223 lda r25, osfsf_c_size(sp) // pop stack
4224 bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
4225
4226 mfpr r14, exc_addr // save exc_addr in case of fault
4227 ldq r23, osfsf_pc(sp) // get pc
4228
4229 ldq r29, osfsf_gp(sp) // get gp
4230 stl_c r31, -4(sp) // clear lock_flag
4231
4232 lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
4233 mfpr r30, pt_usp // get users stack
4234
4235 bic r23, 3, r23 // clean return pc
4236 mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
4237
4238 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
4239 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
4240
4241 mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
4242 mtpr r25, pt_ksp // save kern stack
4243
4244 rc r31 // clear inter_flag
4245// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
4246 hw_rei_spe // and back
4247
4248
4249 CALL_PAL_PRIV(0x003E)
4250CallPal_OpcDec3E:
4251 br r31, osfpal_calpal_opcdec
4252
4253// .sbttl "rti - PALcode for rti instruction"
4254//+
4255//
4256// Entry:
4257// Vectored into via hardware PALcode instruction dispatch.
4258//
4259// Function:
4260// 00(sp) -> ps
4261// 08(sp) -> pc
4262// 16(sp) -> r29 (gp)
4263// 24(sp) -> r16 (a0)
4264// 32(sp) -> r17 (a1)
4265// 40(sp) -> r18 (a3)
4266//-
4267
4268 CALL_PAL_PRIV(PAL_RTI_ENTRY)
4269#ifdef SIMOS
4270 /* called once by platform_tlaser */
4271 .globl Call_Pal_Rti
4272#endif
4273Call_Pal_Rti:
4274 lda r25, osfsf_c_size(sp) // get updated sp
4275 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
4276
4277 mfpr r14, exc_addr // save PC in case of fault
4278 rc r31 // clear intr_flag
4279
4280 ldq r12, -6*8(r25) // get ps
4281 ldq r13, -5*8(r25) // pc
4282
4283 ldq r18, -1*8(r25) // a2
4284 ldq r17, -2*8(r25) // a1
4285
4286 ldq r16, -3*8(r25) // a0
4287 ldq r29, -4*8(r25) // gp
4288
4289 bic r13, 3, r13 // clean return pc
4290 stl_c r31, -4(r25) // clear lock_flag
4291
4292 and r12, osfps_m_mode, r11 // get mode
4293 mtpr r13, exc_addr // set return address
4294
4295 beq r11, rti_to_kern // br if rti to Kern
4296 br r31, rti_to_user // out of call_pal space
4297
4298
4299// .sbttl "Start the Unprivileged CALL_PAL Entry Points"
4300// .sbttl "bpt- PALcode for bpt instruction"
4301//+
4302//
4303// Entry:
4304// Vectored into via hardware PALcode instruction dispatch.
4305//
4306// Function:
4307// Build stack frame
4308// a0 <- code
4309// a1 <- unpred
4310// a2 <- unpred
4311// vector via entIF
4312//
4313//-
4314//
4315 .text 1
4316// . = 0x3000
4317 CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
4318Call_Pal_Bpt:
4319 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4320 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4321
4322 bis r11, r31, r12 // Save PS for stack write
4323 bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
4324
4325 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4326 // no virt ref for next 2 cycles
4327 mtpr r30, pt_usp // save user stack
4328
4329 bis r31, r31, r11 // Set new PS
4330 mfpr r30, pt_ksp
4331
4332CALL_PAL_bpt_10_:
4333 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4334 mfpr r14, exc_addr // get pc
4335
4336 stq r16, osfsf_a0(sp) // save regs
4337 bis r31, osf_a0_bpt, r16 // set a0
4338
4339 stq r17, osfsf_a1(sp) // a1
4340 br r31, bpt_bchk_common // out of call_pal space
4341
4342
4343// .sbttl "bugchk- PALcode for bugchk instruction"
4344//+
4345//
4346// Entry:
4347// Vectored into via hardware PALcode instruction dispatch.
4348//
4349// Function:
4350// Build stack frame
4351// a0 <- code
4352// a1 <- unpred
4353// a2 <- unpred
4354// vector via entIF
4355//
4356//-
4357//
4358 CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
4359Call_Pal_Bugchk:
4360 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4361 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4362
4363 bis r11, r31, r12 // Save PS for stack write
4364 bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
4365
4366 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4367 // no virt ref for next 2 cycles
4368 mtpr r30, pt_usp // save user stack
4369
4370 bis r31, r31, r11 // Set new PS
4371 mfpr r30, pt_ksp
4372
4373CALL_PAL_bugchk_10_:
4374 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4375 mfpr r14, exc_addr // get pc
4376
4377 stq r16, osfsf_a0(sp) // save regs
4378 bis r31, osf_a0_bugchk, r16 // set a0
4379
4380 stq r17, osfsf_a1(sp) // a1
4381 br r31, bpt_bchk_common // out of call_pal space
4382
4383
4384 CALL_PAL_UNPRIV(0x0082)
4385CallPal_OpcDec82:
4386 br r31, osfpal_calpal_opcdec
4387
4388// .sbttl "callsys - PALcode for callsys instruction"
4389//+
4390//
4391// Entry:
4392// Vectored into via hardware PALcode instruction dispatch.
4393//
4394// Function:
4395// Switch mode to kernel and build a callsys stack frame.
4396// sp = ksp
4397// gp = kgp
4398// t8 - t10 (r22-r24) trashed
4399//
4400//-
4401//
4402 CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
4403Call_Pal_Callsys:
4404
4405 and r11, osfps_m_mode, r24 // get mode
4406 mfpr r22, pt_ksp // get ksp
4407
4408 beq r24, sys_from_kern // sysCall from kern is not allowed
4409 mfpr r12, pt_entsys // get address of callSys routine
4410
4411//+
4412// from here on we know we are in user going to Kern
4413//-
4414 mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
4415 mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
4416
4417 bis r31, r31, r11 // PS=0 (mode=kern)
4418 mfpr r23, exc_addr // get pc
4419
4420 mtpr r30, pt_usp // save usp
4421 lda sp, 0-osfsf_c_size(r22)// set new sp
4422
4423 stq r29, osfsf_gp(sp) // save user gp/r29
4424 stq r24, osfsf_ps(sp) // save ps
4425
4426 stq r23, osfsf_pc(sp) // save pc
4427 mtpr r12, exc_addr // set address
4428 // 1 cycle to hw_rei
4429
4430 mfpr r29, pt_kgp // get the kern gp/r29
4431
4432 hw_rei_spe // and off we go!
4433
4434
4435 CALL_PAL_UNPRIV(0x0084)
4436CallPal_OpcDec84:
4437 br r31, osfpal_calpal_opcdec
4438
4439 CALL_PAL_UNPRIV(0x0085)
4440CallPal_OpcDec85:
4441 br r31, osfpal_calpal_opcdec
4442
4443// .sbttl "imb - PALcode for imb instruction"
4444//+
4445//
4446// Entry:
4447// Vectored into via hardware PALcode instruction dispatch.
4448//
4449// Function:
4450// Flush the writebuffer and flush the Icache
4451//
4452//-
4453//
4454 CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
4455Call_Pal_Imb:
4456 mb // Clear the writebuffer
4457 mfpr r31, ev5__mcsr // Sync with clear
4458 nop
4459 nop
4460 br r31, pal_ic_flush // Flush Icache
4461
4462
4463// .sbttl "CALL_PAL OPCDECs"
4464
4465 CALL_PAL_UNPRIV(0x0087)
4466CallPal_OpcDec87:
4467 br r31, osfpal_calpal_opcdec
4468
4469 CALL_PAL_UNPRIV(0x0088)
4470CallPal_OpcDec88:
4471 br r31, osfpal_calpal_opcdec
4472
4473 CALL_PAL_UNPRIV(0x0089)
4474CallPal_OpcDec89:
4475 br r31, osfpal_calpal_opcdec
4476
4477 CALL_PAL_UNPRIV(0x008A)
4478CallPal_OpcDec8A:
4479 br r31, osfpal_calpal_opcdec
4480
4481 CALL_PAL_UNPRIV(0x008B)
4482CallPal_OpcDec8B:
4483 br r31, osfpal_calpal_opcdec
4484
4485 CALL_PAL_UNPRIV(0x008C)
4486CallPal_OpcDec8C:
4487 br r31, osfpal_calpal_opcdec
4488
4489 CALL_PAL_UNPRIV(0x008D)
4490CallPal_OpcDec8D:
4491 br r31, osfpal_calpal_opcdec
4492
4493 CALL_PAL_UNPRIV(0x008E)
4494CallPal_OpcDec8E:
4495 br r31, osfpal_calpal_opcdec
4496
4497 CALL_PAL_UNPRIV(0x008F)
4498CallPal_OpcDec8F:
4499 br r31, osfpal_calpal_opcdec
4500
4501 CALL_PAL_UNPRIV(0x0090)
4502CallPal_OpcDec90:
4503 br r31, osfpal_calpal_opcdec
4504
4505 CALL_PAL_UNPRIV(0x0091)
4506CallPal_OpcDec91:
4507 br r31, osfpal_calpal_opcdec
4508
4509 CALL_PAL_UNPRIV(0x0092)
4510CallPal_OpcDec92:
4511 br r31, osfpal_calpal_opcdec
4512
4513 CALL_PAL_UNPRIV(0x0093)
4514CallPal_OpcDec93:
4515 br r31, osfpal_calpal_opcdec
4516
4517 CALL_PAL_UNPRIV(0x0094)
4518CallPal_OpcDec94:
4519 br r31, osfpal_calpal_opcdec
4520
4521 CALL_PAL_UNPRIV(0x0095)
4522CallPal_OpcDec95:
4523 br r31, osfpal_calpal_opcdec
4524
4525 CALL_PAL_UNPRIV(0x0096)
4526CallPal_OpcDec96:
4527 br r31, osfpal_calpal_opcdec
4528
4529 CALL_PAL_UNPRIV(0x0097)
4530CallPal_OpcDec97:
4531 br r31, osfpal_calpal_opcdec
4532
4533 CALL_PAL_UNPRIV(0x0098)
4534CallPal_OpcDec98:
4535 br r31, osfpal_calpal_opcdec
4536
4537 CALL_PAL_UNPRIV(0x0099)
4538CallPal_OpcDec99:
4539 br r31, osfpal_calpal_opcdec
4540
4541 CALL_PAL_UNPRIV(0x009A)
4542CallPal_OpcDec9A:
4543 br r31, osfpal_calpal_opcdec
4544
4545 CALL_PAL_UNPRIV(0x009B)
4546CallPal_OpcDec9B:
4547 br r31, osfpal_calpal_opcdec
4548
4549 CALL_PAL_UNPRIV(0x009C)
4550CallPal_OpcDec9C:
4551 br r31, osfpal_calpal_opcdec
4552
4553 CALL_PAL_UNPRIV(0x009D)
4554CallPal_OpcDec9D:
4555 br r31, osfpal_calpal_opcdec
4556
4557// .sbttl "rdunique - PALcode for rdunique instruction"
4558//+
4559//
4560// Entry:
4561// Vectored into via hardware PALcode instruction dispatch.
4562//
4563// Function:
4564// v0 (r0) <- unique
4565//
4566//-
4567//
4568 CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
4569CALL_PALrdunique_:
4570 mfpr r0, pt_pcbb // get pcb pointer
4571 ldqp r0, osfpcb_q_unique(r0) // get new value
4572
4573 hw_rei
4574
4575// .sbttl "wrunique - PALcode for wrunique instruction"
4576//+
4577//
4578// Entry:
4579// Vectored into via hardware PALcode instruction dispatch.
4580//
4581// Function:
4582// unique <- a0 (r16)
4583//
4584//-
4585//
4586CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
4587CALL_PAL_Wrunique:
4588 nop
4589 mfpr r12, pt_pcbb // get pcb pointer
4590 stqp r16, osfpcb_q_unique(r12)// get new value
4591 nop // Pad palshadow write
4592 hw_rei // back
4593
4594// .sbttl "CALL_PAL OPCDECs"
4595
4596 CALL_PAL_UNPRIV(0x00A0)
4597CallPal_OpcDecA0:
4598 br r31, osfpal_calpal_opcdec
4599
4600 CALL_PAL_UNPRIV(0x00A1)
4601CallPal_OpcDecA1:
4602 br r31, osfpal_calpal_opcdec
4603
4604 CALL_PAL_UNPRIV(0x00A2)
4605CallPal_OpcDecA2:
4606 br r31, osfpal_calpal_opcdec
4607
4608 CALL_PAL_UNPRIV(0x00A3)
4609CallPal_OpcDecA3:
4610 br r31, osfpal_calpal_opcdec
4611
4612 CALL_PAL_UNPRIV(0x00A4)
4613CallPal_OpcDecA4:
4614 br r31, osfpal_calpal_opcdec
4615
4616 CALL_PAL_UNPRIV(0x00A5)
4617CallPal_OpcDecA5:
4618 br r31, osfpal_calpal_opcdec
4619
4620 CALL_PAL_UNPRIV(0x00A6)
4621CallPal_OpcDecA6:
4622 br r31, osfpal_calpal_opcdec
4623
4624 CALL_PAL_UNPRIV(0x00A7)
4625CallPal_OpcDecA7:
4626 br r31, osfpal_calpal_opcdec
4627
4628 CALL_PAL_UNPRIV(0x00A8)
4629CallPal_OpcDecA8:
4630 br r31, osfpal_calpal_opcdec
4631
4632 CALL_PAL_UNPRIV(0x00A9)
4633CallPal_OpcDecA9:
4634 br r31, osfpal_calpal_opcdec
4635
4636
4637// .sbttl "gentrap - PALcode for gentrap instruction"
4638//+
4639// CALL_PAL_gentrap:
4640// Entry:
4641// Vectored into via hardware PALcode instruction dispatch.
4642//
4643// Function:
4644// Build stack frame
4645// a0 <- code
4646// a1 <- unpred
4647// a2 <- unpred
4648// vector via entIF
4649//
4650//-
4651
4652 CALL_PAL_UNPRIV(0x00AA)
4653// unsupported in Hudson code .. pboyle Nov/95
4654CALL_PAL_gentrap:
4655 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
4656 mtpr r31, ev5__ps // Set Ibox current mode to kernel
4657
4658 bis r11, r31, r12 // Save PS for stack write
4659 bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
4660
4661 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
4662 // no virt ref for next 2 cycles
4663 mtpr r30, pt_usp // save user stack
4664
4665 bis r31, r31, r11 // Set new PS
4666 mfpr r30, pt_ksp
4667
4668CALL_PAL_gentrap_10_:
4669 lda sp, 0-osfsf_c_size(sp)// allocate stack space
4670 mfpr r14, exc_addr // get pc
4671
4672 stq r16, osfsf_a0(sp) // save regs
4673 bis r31, osf_a0_gentrap, r16// set a0
4674
4675 stq r17, osfsf_a1(sp) // a1
4676 br r31, bpt_bchk_common // out of call_pal space
4677
4678
4679// .sbttl "CALL_PAL OPCDECs"
4680
4681 CALL_PAL_UNPRIV(0x00AB)
4682CallPal_OpcDecAB:
4683 br r31, osfpal_calpal_opcdec
4684
4685 CALL_PAL_UNPRIV(0x00AC)
4686CallPal_OpcDecAC:
4687 br r31, osfpal_calpal_opcdec
4688
4689 CALL_PAL_UNPRIV(0x00AD)
4690CallPal_OpcDecAD:
4691 br r31, osfpal_calpal_opcdec
4692
4693 CALL_PAL_UNPRIV(0x00AE)
4694CallPal_OpcDecAE:
4695 br r31, osfpal_calpal_opcdec
4696
4697 CALL_PAL_UNPRIV(0x00AF)
4698CallPal_OpcDecAF:
4699 br r31, osfpal_calpal_opcdec
4700
4701 CALL_PAL_UNPRIV(0x00B0)
4702CallPal_OpcDecB0:
4703 br r31, osfpal_calpal_opcdec
4704
4705 CALL_PAL_UNPRIV(0x00B1)
4706CallPal_OpcDecB1:
4707 br r31, osfpal_calpal_opcdec
4708
4709 CALL_PAL_UNPRIV(0x00B2)
4710CallPal_OpcDecB2:
4711 br r31, osfpal_calpal_opcdec
4712
4713 CALL_PAL_UNPRIV(0x00B3)
4714CallPal_OpcDecB3:
4715 br r31, osfpal_calpal_opcdec
4716
4717 CALL_PAL_UNPRIV(0x00B4)
4718CallPal_OpcDecB4:
4719 br r31, osfpal_calpal_opcdec
4720
4721 CALL_PAL_UNPRIV(0x00B5)
4722CallPal_OpcDecB5:
4723 br r31, osfpal_calpal_opcdec
4724
4725 CALL_PAL_UNPRIV(0x00B6)
4726CallPal_OpcDecB6:
4727 br r31, osfpal_calpal_opcdec
4728
4729 CALL_PAL_UNPRIV(0x00B7)
4730CallPal_OpcDecB7:
4731 br r31, osfpal_calpal_opcdec
4732
4733 CALL_PAL_UNPRIV(0x00B8)
4734CallPal_OpcDecB8:
4735 br r31, osfpal_calpal_opcdec
4736
4737 CALL_PAL_UNPRIV(0x00B9)
4738CallPal_OpcDecB9:
4739 br r31, osfpal_calpal_opcdec
4740
4741 CALL_PAL_UNPRIV(0x00BA)
4742CallPal_OpcDecBA:
4743 br r31, osfpal_calpal_opcdec
4744
4745 CALL_PAL_UNPRIV(0x00BB)
4746CallPal_OpcDecBB:
4747 br r31, osfpal_calpal_opcdec
4748
4749 CALL_PAL_UNPRIV(0x00BC)
4750CallPal_OpcDecBC:
4751 br r31, osfpal_calpal_opcdec
4752
4753 CALL_PAL_UNPRIV(0x00BD)
4754CallPal_OpcDecBD:
4755 br r31, osfpal_calpal_opcdec
4756
4757 CALL_PAL_UNPRIV(0x00BE)
4758CallPal_OpcDecBE:
4759 br r31, osfpal_calpal_opcdec
4760
4761 CALL_PAL_UNPRIV(0x00BF)
4762CallPal_OpcDecBF:
4763 // MODIFIED BY EGH 2/25/04
4764 br r31, copypal_impl
4765
4766
4767/*======================================================================*/
4768/* OSF/1 CALL_PAL CONTINUATION AREA */
4769/*======================================================================*/
4770
4771 .text 2
4772
4773 . = 0x4000
4774
4775
4776// .sbttl "Continuation of MTPR_PERFMON"
4777 ALIGN_BLOCK
4778#if perfmon_debug == 0
4779 // "real" performance monitoring code
4780// mux ctl
4781perfmon_muxctl:
4782 lda r8, 1(r31) // get a 1
4783 sll r8, pmctr_v_sel0, r8 // move to sel0 position
4784 or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
4785 and r17, r8, r25 // isolate pmctr mux select bits
4786 mfpr r0, ev5__pmctr
4787 bic r0, r8, r0 // clear old mux select bits
4788 or r0,r25, r25 // or in new mux select bits
4789 mtpr r25, ev5__pmctr
4790
4791 // ok, now tackle cbox mux selects
4792 ldah r14, 0xfff0(r31)
4793 zap r14, 0xE0, r14 // Get Cbox IPR base
4794//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
4795// adapted from ev5_pal_macros.mar
4796 mfpr r16, pt_impure
4797 lda r16, CNS_Q_IPR(r16)
4798 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
4799
4800 lda r8, 0x3F(r31) // build mux select mask
4801 sll r8, bc_ctl_v_pm_mux_sel, r8
4802
4803 and r17, r8, r25 // isolate bc_ctl mux select bits
4804 bic r16, r8, r16 // isolate old mux select bits
4805 or r16, r25, r25 // create new bc_ctl
4806 mb // clear out cbox for future ipr write
4807 stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
4808 mb // clear out cbox for future ipr write
4809
4810//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
4811// adapted from ev5_pal_macros.mar
4812 mfpr r16, pt_impure
4813 lda r16, CNS_Q_IPR(r16)
4814 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
4815
4816 br r31, perfmon_success
4817
4818
4819// requested to disable perf monitoring
4820perfmon_dis:
4821 mfpr r14, ev5__pmctr // read ibox pmctr ipr
4822perfmon_dis_ctr0: // and begin with ctr0
4823 blbc r17, perfmon_dis_ctr1 // do not disable ctr0
4824 lda r8, 3(r31)
4825 sll r8, pmctr_v_ctl0, r8
4826 bic r14, r8, r14 // disable ctr0
4827perfmon_dis_ctr1:
4828 srl r17, 1, r17
4829 blbc r17, perfmon_dis_ctr2 // do not disable ctr1
4830 lda r8, 3(r31)
4831 sll r8, pmctr_v_ctl1, r8
4832 bic r14, r8, r14 // disable ctr1
4833perfmon_dis_ctr2:
4834 srl r17, 1, r17
4835 blbc r17, perfmon_dis_update // do not disable ctr2
4836 lda r8, 3(r31)
4837 sll r8, pmctr_v_ctl2, r8
4838 bic r14, r8, r14 // disable ctr2
4839perfmon_dis_update:
4840 mtpr r14, ev5__pmctr // update pmctr ipr
4841//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
4842// adapted from ev5_pal_macros.mar
4843//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
4844 mfpr r25, pt_impure
4845 lda r25, CNS_Q_IPR(r25)
4846 RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
4847
4848 lda r17, 0x3F(r31) // build mask
4849 sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
4850 and r14, r17, r14 // isolate ctl bits
4851 bic r8, r17, r8 // clear out old ctl bits
4852 or r14, r8, r14 // create shadow ctl bits
4853//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
4854//adjusted impure pointer still in r25
4855 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
4856
4857 br r31, perfmon_success
4858
4859
4860// requested to enable perf monitoring
4861//;the following code can be greatly simplified for pass2, but should work fine as is.
4862
4863
4864perfmon_enclr:
4865 lda r9, 1(r31) // set enclr flag
4866 br perfmon_en_cont
4867
4868perfmon_en:
4869 bis r31, r31, r9 // clear enclr flag
4870
4871perfmon_en_cont:
4872 mfpr r8, pt_pcbb // get PCB base
4873//orig get_pmctr_ctl r25, r25
4874 mfpr r25, pt_impure
4875 lda r25, CNS_Q_IPR(r25)
4876 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
4877
4878 ldqp r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
4879 mfpr r14, ev5__pmctr // read ibox pmctr ipr
4880 srl r16, osfpcb_v_pme, r16 // get pme bit
4881 mfpr r13, icsr
4882 and r16, 1, r16 // isolate pme bit
4883
4884 // this code only needed in pass2 and later
4885//orig sget_addr r12, 1<<icsr_v_pmp, r31
4886 lda r12, 1<<icsr_v_pmp(r31) // pb
4887 bic r13, r12, r13 // clear pmp bit
4888 sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
4889 or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
4890 ev5_pass2 mtpr r13, icsr // update icsr
4891
4892#if ev5_p1 != 0
4893 lda r12, 1(r31)
4894 cmovlbc r25, r12, r16 // r16<0> set if either pme=1 or sprocess=0 (sprocess in bit 0 of r25)
4895#else
4896 bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
4897#endif
4898
4899 sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
4900 bis r14, r31, r13 // copy pmctr
4901
4902perfmon_en_ctr0: // and begin with ctr0
4903 blbc r17, perfmon_en_ctr1 // do not enable ctr0
4904
4905 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
4906 lda r8, 0xffff(r31)
4907 zapnot r8, 3, r8 // ctr0<15:0> mask
4908 sll r8, pmctr_v_ctr0, r8
4909 bic r14, r8, r14 // clear ctr bits
4910 bic r13, r8, r13 // clear ctr bits
4911
4912perfmon_en_noclr0:
4913//orig get_addr r8, 3<<pmctr_v_ctl0, r31
4914 LDLI(r8, (3<<pmctr_v_ctl0))
4915 and r25, r8, r12 //isolate frequency select bits for ctr0
4916 bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
4917 or r14,r12,r14 // or in new ctl0 bits
4918
4919perfmon_en_ctr1: // enable ctr1
4920 srl r17, 1, r17 // get ctr1 enable
4921 blbc r17, perfmon_en_ctr2 // do not enable ctr1
4922
4923 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
4924 lda r8, 0xffff(r31)
4925 zapnot r8, 3, r8 // ctr1<15:0> mask
4926 sll r8, pmctr_v_ctr1, r8
4927 bic r14, r8, r14 // clear ctr bits
4928 bic r13, r8, r13 // clear ctr bits
4929
4930perfmon_en_noclr1:
4931//orig get_addr r8, 3<<pmctr_v_ctl1, r31
4932 LDLI(r8, (3<<pmctr_v_ctl1))
4933 and r25, r8, r12 //isolate frequency select bits for ctr1
4934 bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
4935 or r14,r12,r14 // or in new ctl1 bits
4936
4937perfmon_en_ctr2: // enable ctr2
4938 srl r17, 1, r17 // get ctr2 enable
4939 blbc r17, perfmon_en_return // do not enable ctr2 - return
4940
4941 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
4942 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4943 sll r8, pmctr_v_ctr2, r8
4944 bic r14, r8, r14 // clear ctr bits
4945 bic r13, r8, r13 // clear ctr bits
4946
4947perfmon_en_noclr2:
4948//orig get_addr r8, 3<<pmctr_v_ctl2, r31
4949 LDLI(r8, (3<<pmctr_v_ctl2))
4950 and r25, r8, r12 //isolate frequency select bits for ctr2
4951 bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
4952 or r14,r12,r14 // or in new ctl2 bits
4953
4954perfmon_en_return:
4955 cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
4956 // else only do the counter clears
4957 mtpr r13, ev5__pmctr // update pmctr ipr
4958
4959//;this code not needed for pass2 and later, but does not hurt to leave it in
4960 lda r8, 0x3F(r31)
4961//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
4962 mfpr r12, pt_impure
4963 lda r12, CNS_Q_IPR(r12)
4964 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
4965
4966 sll r8, pmctr_v_ctl2, r8 // build ctl mask
4967 and r8, r14, r14 // isolate new ctl bits
4968 bic r25, r8, r25 // clear out old ctl value
4969 or r25, r14, r14 // create new pmctr_ctl
4970//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
4971 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4972
4973 br r31, perfmon_success
4974
4975
4976// options...
4977perfmon_ctl:
4978
4979// set mode
4980//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4981 mfpr r12, pt_impure
4982 lda r12, CNS_Q_IPR(r12)
4983 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4984
4985//orig get_addr r8, (1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk), r31 // build mode mask for pmctr register
4986 LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
4987 mfpr r0, ev5__pmctr
4988 and r17, r8, r25 // isolate pmctr mode bits
4989 bic r0, r8, r0 // clear old mode bits
4990 or r0, r25, r25 // or in new mode bits
4991 mtpr r25, ev5__pmctr
4992
4993//;the following code will only be used in pass2, but should not hurt anything if run in pass1.
4994 mfpr r8, icsr
4995 lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
4996 bic r8, r25, r8 // clear old pma bit
4997 cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
4998 or r8, r25, r8
4999 ev5_pass2 mtpr r8, icsr // 4 bubbles to hw_rei
5000 mfpr r31, pt0 // pad icsr write
5001 mfpr r31, pt0 // pad icsr write
5002
5003//;the following code not needed for pass2 and later, but should work anyway.
5004 bis r14, 1, r14 // set for select processes
5005 blbs r17, perfmon_sp // branch if select processes
5006 bic r14, 1, r14 // all processes
5007perfmon_sp:
5008//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
5009 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
5010 br r31, perfmon_success
5011
5012// counter frequency select
5013perfmon_freq:
5014//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
5015 mfpr r12, pt_impure
5016 lda r12, CNS_Q_IPR(r12)
5017 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
5018
5019 lda r8, 0x3F(r31)
5020//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
5021// I guess this should be a shift of 4 bits from the above control register structure .. pb
5022#define pmctr_ctl_v_frq2_SHIFT 4
5023 sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
5024
5025 and r8, r17, r17
5026 bic r14, r8, r14 // clear out old frequency select bits
5027
5028 or r17, r14, r14 // or in new frequency select info
5029//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
5030 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
5031
5032 br r31, perfmon_success
5033
5034// read counters
5035perfmon_rd:
5036 mfpr r0, ev5__pmctr
5037 or r0, 1, r0 // or in return status
5038 hw_rei // back to user
5039
5040// write counters
5041perfmon_wr:
5042 mfpr r14, ev5__pmctr
5043 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
5044 sll r8, pmctr_v_ctr2, r8
5045
5046//orig get_addr r9, 0xFFFFFFFF, r31, verify=0 // ctr2<15:0>,ctr1<15:0> mask
5047 LDLI(r9, (0xFFFFFFFF))
5048 sll r9, pmctr_v_ctr1, r9
5049 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
5050 bic r14, r8, r14 // clear ctr fields
5051 and r17, r8, r25 // clear all but ctr fields
5052 or r25, r14, r14 // write ctr fields
5053 mtpr r14, ev5__pmctr // update pmctr ipr
5054
5055 mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
5056
5057perfmon_success:
5058 or r31, 1, r0 // set success
5059 hw_rei // back to user
5060
5061perfmon_unknown:
5062 or r31, r31, r0 // set fail
5063 hw_rei // back to user
5064
5065#else
5066
5067// end of "real code", start of debug code
5068
5069//+
5070// Debug environment:
5071// (in pass2, always set icsr<pma> to ensure master counter enable is on)
5072// R16 = 0 Write to on-chip performance monitor ipr
5073// r17 = on-chip ipr
5074// r0 = return value of read of on-chip performance monitor ipr
5075// R16 = 1 Setup Cbox mux selects
5076// r17 = Cbox mux selects in same position as in bc_ctl ipr.
5077// r0 = return value of read of on-chip performance monitor ipr
5078//
5079//-
5080pal_perfmon_debug:
5081 mfpr r8, icsr
5082 lda r9, 1<<icsr_v_pma(r31)
5083 bis r8, r9, r8
5084 mtpr r8, icsr
5085
5086 mfpr r0, ev5__pmctr // read old value
5087 bne r16, cbox_mux_sel
5088
5089 mtpr r17, ev5__pmctr // update pmctr ipr
5090 br r31, end_pm
5091
5092cbox_mux_sel:
5093 // ok, now tackle cbox mux selects
5094 ldah r14, 0xfff0(r31)
5095 zap r14, 0xE0, r14 // Get Cbox IPR base
5096//orig get_bc_ctl_shadow r16 // bc_ctl returned
5097 mfpr r16, pt_impure
5098 lda r16, CNS_Q_IPR(r16)
5099 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
5100
5101 lda r8, 0x3F(r31) // build mux select mask
5102 sll r8, BC_CTL_V_PM_MUX_SEL, r8
5103
5104 and r17, r8, r25 // isolate bc_ctl mux select bits
5105 bic r16, r8, r16 // isolate old mux select bits
5106 or r16, r25, r25 // create new bc_ctl
5107 mb // clear out cbox for future ipr write
5108 stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
5109 mb // clear out cbox for future ipr write
5110//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
5111 mfpr r16, pt_impure
5112 lda r16, CNS_Q_IPR(r16)
5113 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
5114
5115end_pm: hw_rei
5116
5117#endif
5118
5119
5120//;The following code is a workaround for a cpu bug where Istream prefetches to
5121//;super-page address space in user mode may escape off-chip.
5122#if spe_fix != 0
5123
5124 ALIGN_BLOCK
5125hw_rei_update_spe:
5126 mfpr r12, pt_misc // get previous mode
5127 srl r11, osfps_v_mode, r10 // isolate current mode bit
5128 and r10, 1, r10
5129 extbl r12, 7, r8 // get previous mode field
5130 and r8, 1, r8 // isolate previous mode bit
5131 cmpeq r10, r8, r8 // compare previous and current modes
5132 beq r8, hw_rei_update_spe_5_
5133 hw_rei // if same, just return
5134
5135hw_rei_update_spe_5_:
5136
5137#if fill_err_hack != 0
5138
5139 fill_error_hack
5140#endif
5141
5142 mfpr r8, icsr // get current icsr value
5143 ldah r9, (2<<(icsr_v_spe-16))(r31) // get spe bit mask
5144 bic r8, r9, r8 // disable spe
5145 xor r10, 1, r9 // flip mode for new spe bit
5146 sll r9, icsr_v_spe+1, r9 // shift into position
5147 bis r8, r9, r8 // enable/disable spe
5148 lda r9, 1(r31) // now update our flag
5149 sll r9, pt_misc_v_cm, r9 // previous mode saved bit mask
5150 bic r12, r9, r12 // clear saved previous mode
5151 sll r10, pt_misc_v_cm, r9 // current mode saved bit mask
5152 bis r12, r9, r12 // set saved current mode
5153 mtpr r12, pt_misc // update pt_misc
5154 mtpr r8, icsr // update icsr
5155
5156#if osf_chm_fix != 0
5157
5158
5159 blbc r10, hw_rei_update_spe_10_ // branch if not user mode
5160
5161 mb // ensure no outstanding fills
5162 lda r12, 1<<dc_mode_v_dc_ena(r31) // User mode
5163 mtpr r12, dc_mode // Turn on dcache
5164 mtpr r31, dc_flush // and flush it
5165 br r31, pal_ic_flush
5166
5167hw_rei_update_spe_10_: mfpr r9, pt_pcbb // Kernel mode
5168 ldqp r9, osfpcb_q_Fen(r9) // get FEN
5169 blbc r9, pal_ic_flush // return if FP disabled
5170 mb // ensure no outstanding fills
5171 mtpr r31, dc_mode // turn off dcache
5172#endif
5173
5174
5175 br r31, pal_ic_flush // Pal restriction - must flush Icache if changing ICSR<SPE>
5176#endif
5177
5178
5179copypal_impl:
5180 mov r16, r0
5181 ble r18, finished #if len <=0 we are finished
5182 ldq_u r8, 0(r17)
5183 xor r17, r16, r9
5184 and r9, 7, r9
5185 and r16, 7, r10
5186 bne r9, unaligned
5187 beq r10, aligned
5188 ldq_u r9, 0(r16)
5189 addq r18, r10, r18
5190 mskqh r8, r17, r8
5191 mskql r9, r17, r9
5192 bis r8, r9, r8
5193aligned:
5194 subq r18, 1, r10
5195 bic r10, 7, r10
5196 and r18, 7, r18
5197 beq r10, aligned_done
5198loop:
5199 stq_u r8, 0(r16)
5200 ldq_u r8, 8(r17)
5201 subq r10, 8, r10
5202 lda r16,8(r16)
5203 lda r17,8(r17)
5204 bne r10, loop
5205aligned_done:
5206 bne r18, few_left
5207 stq_u r8, 0(r16)
5208 br r31, finished
5209 few_left:
5210 mskql r8, r18, r10
5211 ldq_u r9, 0(r16)
5212 mskqh r9, r18, r9
5213 bis r10, r9, r10
5214 stq_u r10, 0(r16)
5215 br r31, finished
5216unaligned:
5217 addq r17, r18, r25
5218 cmpule r18, 8, r9
5219 bne r9, unaligned_few_left
5220 beq r10, unaligned_dest_aligned
5221 and r16, 7, r10
5222 subq r31, r10, r10
5223 addq r10, 8, r10
5224 ldq_u r9, 7(r17)
5225 extql r8, r17, r8
5226 extqh r9, r17, r9
5227 bis r8, r9, r12
5228 insql r12, r16, r12
5229 ldq_u r13, 0(r16)
5230 mskql r13, r16, r13
5231 bis r12, r13, r12
5232 stq_u r12, 0(r16)
5233 addq r16, r10, r16
5234 addq r17, r10, r17
5235 subq r18, r10, r18
5236 ldq_u r8, 0(r17)
5237unaligned_dest_aligned:
5238 subq r18, 1, r10
5239 bic r10, 7, r10
5240 and r18, 7, r18
5241 beq r10, unaligned_partial_left
5242unaligned_loop:
5243 ldq_u r9, 7(r17)
5244 lda r17, 8(r17)
5245 extql r8, r17, r12
5246 extqh r9, r17, r13
5247 subq r10, 8, r10
5248 bis r12, r13, r13
5249 stq r13, 0(r16)
5250 lda r16, 8(r16)
5251 beq r10, unaligned_second_partial_left
5252 ldq_u r8, 7(r17)
5253 lda r17, 8(r17)
5254 extql r9, r17, r12
5255 extqh r8, r17, r13
5256 bis r12, r13, r13
5257 subq r10, 8, r10
5258 stq r13, 0(r16)
5259 lda r16, 8(r16)
5260 bne r10, unaligned_loop
5261unaligned_partial_left:
5262 mov r8, r9
5263unaligned_second_partial_left:
5264 ldq_u r8, -1(r25)
5265 extql r9, r17, r9
5266 extqh r8, r17, r8
5267 bis r8, r9, r8
5268 bne r18, few_left
5269 stq_u r8, 0(r16)
5270 br r31, finished
5271unaligned_few_left:
5272 ldq_u r9, -1(r25)
5273 extql r8, r17, r8
5274 extqh r9, r17, r9
5275 bis r8, r9, r8
5276 insqh r8, r16, r9
5277 insql r8, r16, r8
5278 lda r12, -1(r31)
5279 mskql r12, r18, r13
5280 cmovne r13, r13, r12
5281 insqh r12, r16, r13
5282 insql r12, r16, r12
5283 addq r16, r18, r10
5284 ldq_u r14, 0(r16)
5285 ldq_u r25, -1(r10)
5286 bic r14, r12, r14
5287 bic r25, r13, r25
5288 and r8, r12, r8
5289 and r9, r13, r9
5290 bis r8, r14, r8
5291 bis r9, r25, r9
5292 stq_u r9, -1(r10)
5293 stq_u r8, 0(r16)
5294finished:
5295 hw_rei