osfpal.S (8013:2dfcde2e9998) osfpal.S (8026:680f5c014bed)
1/*
1/*
2 * Copyright (c) 2003, 2004
2 * Copyright (c) 2003, 2004, 2005, 2006
3 * The Regents of The University of Michigan
4 * All Rights Reserved
5 *
3 * The Regents of The University of Michigan
4 * All Rights Reserved
5 *
6 * This code is part of the M5 simulator, developed by Nathan Binkert,
7 * Erik Hallnor, Steve Raasch, and Steve Reinhardt, with contributions
8 * from Ron Dreslinski, Dave Greene, Lisa Hsu, Ali Saidi, and Andrew
9 * Schultz.
6 * This code is part of the M5 simulator.
10 *
11 * Permission is granted to use, copy, create derivative works and
12 * redistribute this software and such derivative works for any
13 * purpose, so long as the copyright notice above, this grant of
14 * permission, and the disclaimer below appear in all copies made; and
15 * so long as the name of The University of Michigan is not used in
16 * any advertising or publicity pertaining to the use or distribution
17 * of this software without specific, written prior authorization.
18 *
19 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE
20 * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND
21 * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE
25 * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT,
26 * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM
27 * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
28 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH
29 * DAMAGES.
7 *
8 * Permission is granted to use, copy, create derivative works and
9 * redistribute this software and such derivative works for any
10 * purpose, so long as the copyright notice above, this grant of
11 * permission, and the disclaimer below appear in all copies made; and
12 * so long as the name of The University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization.
15 *
16 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE
17 * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND
18 * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER
19 * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE
22 * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT,
23 * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM
24 * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
25 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGES.
27 *
28 * Modified for M5 by: Ali G. Saidi
29 * Nathan L. Binkert
30 */
31
32/*
33 * Copyright 1992, 1993, 1994, 1995 Hewlett-Packard Development
34 * Company, L.P.
35 *
36 * Permission is hereby granted, free of charge, to any person
37 * obtaining a copy of this software and associated documentation
38 * files (the "Software"), to deal in the Software without
39 * restriction, including without limitation the rights to use, copy,
40 * modify, merge, publish, distribute, sublicense, and/or sell copies
41 * of the Software, and to permit persons to whom the Software is
42 * furnished to do so, subject to the following conditions:
43 *
44 * The above copyright notice and this permission notice shall be
45 * included in all copies or substantial portions of the Software.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
48 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
50 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
51 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
52 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
53 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
54 * SOFTWARE.
55 */
56
57// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
58// since we don't have a mechanism to expand the data structures.... pb Nov/95
59#include "ev5_defs.h"
60#include "ev5_impure.h"
61#include "ev5_alpha_defs.h"
62#include "ev5_paldef.h"
63#include "ev5_osfalpha_defs.h"
64#include "fromHudsonMacros.h"
65#include "fromHudsonOsf.h"
66#include "dc21164FromGasSources.h"
67
68#define DEBUGSTORE(c) nop
69
70#define DEBUG_EXC_ADDR()\
71 bsr r25, put_exc_addr; \
72 DEBUGSTORE(13) ; \
73 DEBUGSTORE(10)
74
75// This is the fix for the user-mode super page references causing the
76// machine to crash.
77#define hw_rei_spe hw_rei
78
79#define vmaj 1
80#define vmin 18
81#define vms_pal 1
82#define osf_pal 2
83#define pal_type osf_pal
84#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
85
86
87///////////////////////////
88// PALtemp register usage
89///////////////////////////
90
91// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
92// for these PALtemps:
93//
94// pt0 local scratch
95// pt1 local scratch
96// pt2 entUna pt_entUna
97// pt3 CPU specific impure area pointer pt_impure
98// pt4 memory management temp
99// pt5 memory management temp
100// pt6 memory management temp
101// pt7 entIF pt_entIF
102// pt8 intmask pt_intmask
103// pt9 entSys pt_entSys
104// pt10
105// pt11 entInt pt_entInt
106// pt12 entArith pt_entArith
107// pt13 reserved for system specific PAL
108// pt14 reserved for system specific PAL
109// pt15 reserved for system specific PAL
110// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami,
111// pt_mces
112// pt17 sysval pt_sysval
113// pt18 usp pt_usp
114// pt19 ksp pt_ksp
115// pt20 PTBR pt_ptbr
116// pt21 entMM pt_entMM
117// pt22 kgp pt_kgp
118// pt23 PCBB pt_pcbb
119//
120//
121
122
123/////////////////////////////
124// PALshadow register usage
125/////////////////////////////
126
127//
128// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
129// This maps the OSF PAL usage of R8 - R14 and R25:
130//
131// r8 ITBmiss/DTBmiss scratch
132// r9 ITBmiss/DTBmiss scratch
133// r10 ITBmiss/DTBmiss scratch
134// r11 PS
135// r12 local scratch
136// r13 local scratch
137// r14 local scratch
138// r25 local scratch
139//
140
141
142
143// .sbttl "PALcode configuration options"
144
145// There are a number of options that may be assembled into this version of
146// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
147// the following). The options that can be adjusted cause the resultant PALcode
148// to reflect the desired target system.
149
150// multiprocessor support can be enabled for a max of n processors by
151// setting the following to the number of processors on the system.
152// Note that this is really the max cpuid.
153
154#define max_cpuid 1
155#ifndef max_cpuid
156#define max_cpuid 8
157#endif
158
159#define osf_svmin 1
160#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
161
162//
163// RESET - Reset Trap Entry Point
164//
165// RESET - offset 0000
166// Entry:
167// Vectored into via hardware trap on reset, or branched to
168// on swppal.
169//
170// r0 = whami
171// r1 = pal_base
172// r2 = base of scratch area
173// r3 = halt code
174//
175//
176// Function:
177//
178//
179
180 .text 0
181 . = 0x0000
182 .globl _start
183 .globl Pal_Base
184_start:
185Pal_Base:
186 HDW_VECTOR(PAL_RESET_ENTRY)
187Trap_Reset:
188 nop
189 /*
190 * store into r1
191 */
192 br r1,sys_reset
193
194 // Specify PAL version info as a constant
195 // at a known location (reset + 8).
196
197 .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
198 .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
199 .long 0
200 .long 0
201pal_impure_start:
202 .quad 0
203pal_debug_ptr:
204 .quad 0 // reserved for debug pointer ; 20
205
206
207//
208// IACCVIO - Istream Access Violation Trap Entry Point
209//
210// IACCVIO - offset 0080
211// Entry:
212// Vectored into via hardware trap on Istream access violation or sign check error on PC.
213//
214// Function:
215// Build stack frame
216// a0 <- Faulting VA
217// a1 <- MMCSR (1 for ACV)
218// a2 <- -1 (for ifetch fault)
219// vector via entMM
220//
221
222 HDW_VECTOR(PAL_IACCVIO_ENTRY)
223Trap_Iaccvio:
224 DEBUGSTORE(0x42)
225 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
226 mtpr r31, ev5__ps // Set Ibox current mode to kernel
227
228 bis r11, r31, r12 // Save PS
229 bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
230
231
232 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
233 // no virt ref for next 2 cycles
234 mtpr r30, pt_usp // save user stack
235
236 bis r31, r31, r12 // Set new PS
237 mfpr r30, pt_ksp
238
239TRAP_IACCVIO_10_:
240 lda sp, 0-osfsf_c_size(sp)// allocate stack space
241 mfpr r14, exc_addr // get pc
242
243 stq r16, osfsf_a0(sp) // save regs
244 bic r14, 3, r16 // pass pc/va as a0
245
246 stq r17, osfsf_a1(sp) // a1
247 or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
248
249 stq r18, osfsf_a2(sp) // a2
250 mfpr r13, pt_entmm // get entry point
251
252 stq r11, osfsf_ps(sp) // save old ps
253 bis r12, r31, r11 // update ps
254
255 stq r16, osfsf_pc(sp) // save pc
256 stq r29, osfsf_gp(sp) // save gp
257
258 mtpr r13, exc_addr // load exc_addr with entMM
259 // 1 cycle to hw_rei
260 mfpr r29, pt_kgp // get the kgp
261
262 subq r31, 1, r18 // pass flag of istream, as a2
263 hw_rei_spe
264
265
266//
267// INTERRUPT - Interrupt Trap Entry Point
268//
269// INTERRUPT - offset 0100
270// Entry:
271// Vectored into via trap on hardware interrupt
272//
273// Function:
274// check for halt interrupt
275// check for passive release (current ipl geq requestor)
276// if necessary, switch to kernel mode push stack frame,
277// update ps (including current mode and ipl copies), sp, and gp
278// pass the interrupt info to the system module
279//
280//
281 HDW_VECTOR(PAL_INTERRUPT_ENTRY)
282Trap_Interrupt:
283 mfpr r13, ev5__intid // Fetch level of interruptor
284 mfpr r25, ev5__isr // Fetch interrupt summary register
285
286 srl r25, isr_v_hlt, r9 // Get HLT bit
287 mfpr r14, ev5__ipl
288
289 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
290 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
291
292 cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
293 bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
294
295 and r11, osfps_m_mode, r10 // get mode bit
296 beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
297
298 mtpr r30, pt_usp // save user stack
299 mfpr r30, pt_ksp // get kern stack
300
301TRAP_INTERRUPT_10_:
302 lda sp, (0-osfsf_c_size)(sp)// allocate stack space
303 mfpr r14, exc_addr // get pc
304
305 stq r11, osfsf_ps(sp) // save ps
306 stq r14, osfsf_pc(sp) // save pc
307
308 stq r29, osfsf_gp(sp) // push gp
309 stq r16, osfsf_a0(sp) // a0
310
311// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
312 mtpr r31, ev5__ps // Set Ibox current mode to kernel
313 stq r17, osfsf_a1(sp) // a1
314
315 stq r18, osfsf_a2(sp) // a2
316 subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
317
318 srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
319 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
320
321 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
322 bis r12, r31, r11 // set new ps
323
324 mfpr r12, pt_intmask
325 and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
326
327 /*
328 * Lance had space problems. We don't.
329 */
330 extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
331 mfpr r29, pt_kgp // update gp
332 mtpr r14, ev5__ipl // load the new IPL into Ibox
333 br r31, sys_interrupt // Go handle interrupt
334
335
336
337//
338// ITBMISS - Istream TBmiss Trap Entry Point
339//
340// ITBMISS - offset 0180
341// Entry:
342// Vectored into via hardware trap on Istream translation buffer miss.
343//
344// Function:
345// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
346// Can trap into DTBMISS_DOUBLE.
347// This routine can use the PALshadow registers r8, r9, and r10
348//
349//
350
351 HDW_VECTOR(PAL_ITB_MISS_ENTRY)
352Trap_Itbmiss:
353 // Real MM mapping
354 nop
355 mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
356
357 nop
358 mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
359
360pal_itb_ldq:
361 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
362 mtpr r10, exc_addr // Restore exc_address if there was a trap.
363
364 mfpr r31, ev5__va // Unlock VA in case there was a double miss
365 nop
366
367 and r8, osfpte_m_foe, r25 // Look for FOE set.
368 blbc r8, invalid_ipte_handler // PTE not valid.
369
370 nop
371 bne r25, foe_ipte_handler // FOE is set
372
373 nop
374 mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
375
376 hw_rei_stall //
377
378
379//
380// DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point
381//
382// DTBMISS_SINGLE - offset 0200
383// Entry:
384// Vectored into via hardware trap on Dstream single translation
385// buffer miss.
386//
387// Function:
388// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
389// Can trap into DTBMISS_DOUBLE.
390// This routine can use the PALshadow registers r8, r9, and r10
391//
392
393 HDW_VECTOR(PAL_DTB_MISS_ENTRY)
394Trap_Dtbmiss_Single:
395 mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
396 mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
397
398// DEBUGSTORE(0x45)
399// DEBUG_EXC_ADDR()
400 // Real MM mapping
401 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
402 mtpr r10, pt6 // Stash exc_addr away
403
404pal_dtb_ldq:
405 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
406 nop // Pad MF VA
407
408 mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
409 nop
410
411 mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
412 blbc r8, invalid_dpte_handler // Handle invalid PTE
413
414 mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
415 mfpr r10, pt6
416
417 // Following 2 instructions take 2 cycles
418 mtpr r10, exc_addr // Return linkage in case we trapped. E1.
419 mfpr r31, pt0 // Pad the write to dtb_tag
420
421 hw_rei // Done, return
422
423
424//
425// DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point
426//
427//
428// DTBMISS_DOUBLE - offset 0280
429// Entry:
430// Vectored into via hardware trap on Double TBmiss from single
431// miss flows.
432//
433// r8 - faulting VA
434// r9 - original MMstat
435// r10 - original exc_addr (both itb,dtb miss)
436// pt6 - original exc_addr (dtb miss flow only)
437// VA IPR - locked with original faulting VA
438//
439// Function:
440// Get PTE, if valid load TB and return.
441// If not valid then take TNV/ACV exception.
442//
443// pt4 and pt5 are reserved for this flow.
444//
445//
446//
447
448 HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
449Trap_Dtbmiss_double:
450 mtpr r8, pt4 // save r8 to do exc_addr check
451 mfpr r8, exc_addr
452 blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
453 mfpr r8, pt4 // restore r8
454 nop
455 mtpr r22, pt5 // Get some scratch space. E1.
456 // Due to virtual scheme, we can skip the first lookup and go
457 // right to fetch of level 2 PTE
458 sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
459 mtpr r21, pt4 // Get some scratch space. E1.
460
461 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
462 mfpr r21, pt_ptbr // Get physical address of the page table.
463
464 nop
465 addq r21, r22, r21 // Index into page table for level 2 PTE.
466
467 sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
468 ldq_p r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
469
470 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
471 blbc r21, double_pte_inv // Check for Invalid PTE.
472
473 srl r21, 32, r21 // extract PFN from PTE
474 sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
475
476 addq r21, r22, r21 // Index into page table for level 3 PTE.
477 nop
478
479 ldq_p r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
480 blbc r21, double_pte_inv // Check for invalid PTE.
481
482 mtpr r21, ev5__dtb_pte // Write the PTE. E0.
483 mfpr r22, pt5 // Restore scratch register
484
485 mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
486 mfpr r21, pt4 // Restore scratch register
487
488 nop // Pad write to tag.
489 nop
490
491 nop // Pad write to tag.
492 nop
493
494 hw_rei
495
496
497
498//
499// UNALIGN -- Dstream unalign trap
500//
501// UNALIGN - offset 0300
502// Entry:
503// Vectored into via hardware trap on unaligned Dstream reference.
504//
505// Function:
506// Build stack frame
507// a0 <- Faulting VA
508// a1 <- Opcode
509// a2 <- src/dst register number
510// vector via entUna
511//
512
513 HDW_VECTOR(PAL_UNALIGN_ENTRY)
514Trap_Unalign:
515/* DEBUGSTORE(0x47)*/
516 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
517 mtpr r31, ev5__ps // Set Ibox current mode to kernel
518
519 mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
520 mfpr r14, exc_addr // get pc
521
522 srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
523 blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
524
525 blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
526 // not set, must be a load
527 and r13, 0x1F, r8 // isolate ra
528
529 cmpeq r8, 0x1F, r8 // check for r31/F31
530 bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
531
532UNALIGN_NO_DISMISS:
533 bis r11, r31, r12 // Save PS
534 bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
535
536
537 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
538 // no virt ref for next 2 cycles
539 mtpr r30, pt_usp // save user stack
540
541 bis r31, r31, r12 // Set new PS
542 mfpr r30, pt_ksp
543
544UNALIGN_NO_DISMISS_10_:
545 mfpr r25, ev5__va // Unlock VA
546 lda sp, 0-osfsf_c_size(sp)// allocate stack space
547
548 mtpr r25, pt0 // Stash VA
549 stq r18, osfsf_a2(sp) // a2
550
551 stq r11, osfsf_ps(sp) // save old ps
552 srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
553
554 stq r29, osfsf_gp(sp) // save gp
555 addq r14, 4, r14 // inc PC past the ld/st
556
557 stq r17, osfsf_a1(sp) // a1
558 and r25, mm_stat_m_opcode, r17// Clean opocde for a1
559
560 stq r16, osfsf_a0(sp) // save regs
561 mfpr r16, pt0 // a0 <- va/unlock
562
563 stq r14, osfsf_pc(sp) // save pc
564 mfpr r25, pt_entuna // get entry point
565
566
567 bis r12, r31, r11 // update ps
568 br r31, unalign_trap_cont
569
570
571//
572// DFAULT - Dstream Fault Trap Entry Point
573//
574// DFAULT - offset 0380
575// Entry:
576// Vectored into via hardware trap on dstream fault or sign check
577// error on DVA.
578//
579// Function:
580// Ignore faults on FETCH/FETCH_M
581// Check for DFAULT in PAL
582// Build stack frame
583// a0 <- Faulting VA
584// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
585// a2 <- R/W
586// vector via entMM
587//
588//
589 HDW_VECTOR(PAL_D_FAULT_ENTRY)
590Trap_Dfault:
591// DEBUGSTORE(0x48)
592 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
593 mtpr r31, ev5__ps // Set Ibox current mode to kernel
594
595 mfpr r13, ev5__mm_stat // Get mmstat
596 mfpr r8, exc_addr // get pc, preserve r14
597
598 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
599 blbs r8, dfault_in_pal
600
601 bis r8, r31, r14 // move exc_addr to correct place
602 bis r11, r31, r12 // Save PS
603
604 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
605 // no virt ref for next 2 cycles
606 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
607
608 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
609 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
610
611 //dismiss exception if load to r31/f31
612 blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
613
614 // not a store or fetch, must be a load
615 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
616
617 and r9, 0x1F, r9 // isolate rnum
618 nop
619
620 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
621 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
622
623dfault_no_dismiss:
624 and r13, 0xf, r13 // Clean extra bits in mm_stat
625 bge r25, dfault_trap_cont // no stack swap needed if cm=kern
626
627
628 mtpr r30, pt_usp // save user stack
629 bis r31, r31, r12 // Set new PS
630
631 mfpr r30, pt_ksp
632 br r31, dfault_trap_cont
633
634
635//
636// MCHK - Machine Check Trap Entry Point
637//
638// MCHK - offset 0400
639// Entry:
640// Vectored into via hardware trap on machine check.
641//
642// Function:
643//
644//
645
646 HDW_VECTOR(PAL_MCHK_ENTRY)
647Trap_Mchk:
648 DEBUGSTORE(0x49)
649 mtpr r31, ic_flush_ctl // Flush the Icache
650 br r31, sys_machine_check
651
652
653//
654// OPCDEC - Illegal Opcode Trap Entry Point
655//
656// OPCDEC - offset 0480
657// Entry:
658// Vectored into via hardware trap on illegal opcode.
659//
660// Build stack frame
661// a0 <- code
662// a1 <- unpred
663// a2 <- unpred
664// vector via entIF
665//
666//
667
668 HDW_VECTOR(PAL_OPCDEC_ENTRY)
669Trap_Opcdec:
670 DEBUGSTORE(0x4a)
671//simos DEBUG_EXC_ADDR()
672 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
673 mtpr r31, ev5__ps // Set Ibox current mode to kernel
674
675 mfpr r14, exc_addr // get pc
676 blbs r14, pal_pal_bug_check // check opcdec in palmode
677
678 bis r11, r31, r12 // Save PS
679 bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
680
681
682 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
683 // no virt ref for next 2 cycles
684 mtpr r30, pt_usp // save user stack
685
686 bis r31, r31, r12 // Set new PS
687 mfpr r30, pt_ksp
688
689TRAP_OPCDEC_10_:
690 lda sp, 0-osfsf_c_size(sp)// allocate stack space
691 addq r14, 4, r14 // inc pc
692
693 stq r16, osfsf_a0(sp) // save regs
694 bis r31, osf_a0_opdec, r16 // set a0
695
696 stq r11, osfsf_ps(sp) // save old ps
697 mfpr r13, pt_entif // get entry point
698
699 stq r18, osfsf_a2(sp) // a2
700 stq r17, osfsf_a1(sp) // a1
701
702 stq r29, osfsf_gp(sp) // save gp
703 stq r14, osfsf_pc(sp) // save pc
704
705 bis r12, r31, r11 // update ps
706 mtpr r13, exc_addr // load exc_addr with entIF
707 // 1 cycle to hw_rei, E1
708
709 mfpr r29, pt_kgp // get the kgp, E1
710
711 hw_rei_spe // done, E1
712
713
714//
715// ARITH - Arithmetic Exception Trap Entry Point
716//
717// ARITH - offset 0500
718// Entry:
719// Vectored into via hardware trap on arithmetic excpetion.
720//
721// Function:
722// Build stack frame
723// a0 <- exc_sum
724// a1 <- exc_mask
725// a2 <- unpred
726// vector via entArith
727//
728//
729 HDW_VECTOR(PAL_ARITH_ENTRY)
730Trap_Arith:
731 DEBUGSTORE(0x4b)
732 and r11, osfps_m_mode, r12 // get mode bit
733 mfpr r31, ev5__va // unlock mbox
734
735 bis r11, r31, r25 // save ps
736 mfpr r14, exc_addr // get pc
737
738 nop
739 blbs r14, pal_pal_bug_check // arith trap from PAL
740
741 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
742 // no virt ref for next 2 cycles
743 beq r12, TRAP_ARITH_10_ // if zero we are in kern now
744
745 bis r31, r31, r25 // set the new ps
746 mtpr r30, pt_usp // save user stack
747
748 nop
749 mfpr r30, pt_ksp // get kern stack
750
751TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
752 mtpr r31, ev5__ps // Set Ibox current mode to kernel
753
754 nop // Pad current mode write and stq
755 mfpr r13, ev5__exc_sum // get the exc_sum
756
757 mfpr r12, pt_entarith
758 stq r14, osfsf_pc(sp) // save pc
759
760 stq r17, osfsf_a1(sp)
761 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
762
763 stq r11, osfsf_ps(sp) // save ps
764 bis r25, r31, r11 // set new ps
765
766 stq r16, osfsf_a0(sp) // save regs
767 srl r13, exc_sum_v_swc, r16 // shift data to correct position
768
769 stq r18, osfsf_a2(sp)
770// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
771 mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
772
773 stq r29, osfsf_gp(sp)
774 mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
775
776 mfpr r29, pt_kgp // get the kern gp - E1
777 hw_rei_spe // done - E1
778
779
780//
781// FEN - Illegal Floating Point Operation Trap Entry Point
782//
783// FEN - offset 0580
784// Entry:
785// Vectored into via hardware trap on illegal FP op.
786//
787// Function:
788// Build stack frame
789// a0 <- code
790// a1 <- unpred
791// a2 <- unpred
792// vector via entIF
793//
794//
795
796 HDW_VECTOR(PAL_FEN_ENTRY)
797Trap_Fen:
798 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
799 mtpr r31, ev5__ps // Set Ibox current mode to kernel
800
801 mfpr r14, exc_addr // get pc
802 blbs r14, pal_pal_bug_check // check opcdec in palmode
803
804 mfpr r13, ev5__icsr
805 nop
806
807 bis r11, r31, r12 // Save PS
808 bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
809
810 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
811 // no virt ref for next 2 cycles
812 mtpr r30, pt_usp // save user stack
813
814 bis r31, r31, r12 // Set new PS
815 mfpr r30, pt_ksp
816
817TRAP_FEN_10_:
818 lda sp, 0-osfsf_c_size(sp)// allocate stack space
819 srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
820
821
822 stq r16, osfsf_a0(sp) // save regs
823 mfpr r13, pt_entif // get entry point
824
825 stq r18, osfsf_a2(sp) // a2
826 stq r11, osfsf_ps(sp) // save old ps
827
828 stq r29, osfsf_gp(sp) // save gp
829 bis r12, r31, r11 // set new ps
830
831 stq r17, osfsf_a1(sp) // a1
832 blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
833
834 bis r31, osf_a0_fen, r16 // set a0
835 stq r14, osfsf_pc(sp) // save pc
836
837 mtpr r13, exc_addr // load exc_addr with entIF
838 // 1 cycle to hw_rei -E1
839
840 mfpr r29, pt_kgp // get the kgp -E1
841
842 hw_rei_spe // done -E1
843
844// FEN trap was taken, but the fault is really opcdec.
845 ALIGN_BRANCH
846fen_to_opcdec:
847 addq r14, 4, r14 // save PC+4
848 bis r31, osf_a0_opdec, r16 // set a0
849
850 stq r14, osfsf_pc(sp) // save pc
851 mtpr r13, exc_addr // load exc_addr with entIF
852 // 1 cycle to hw_rei
853
854 mfpr r29, pt_kgp // get the kgp
855 hw_rei_spe // done
856
857
858
859//////////////////////////////////////////////////////////////////////////////
860// Misc handlers - Start area for misc code.
861//////////////////////////////////////////////////////////////////////////////
862
863//
864// dfault_trap_cont
865// A dfault trap has been taken. The sp has been updated if necessary.
866// Push a stack frame a vector via entMM.
867//
868// Current state:
869// r12 - new PS
870// r13 - MMstat
871// VA - locked
872//
873//
874 ALIGN_BLOCK
875dfault_trap_cont:
876 lda sp, 0-osfsf_c_size(sp)// allocate stack space
877 mfpr r25, ev5__va // Fetch VA/unlock
878
879 stq r18, osfsf_a2(sp) // a2
880 and r13, 1, r18 // Clean r/w bit for a2
881
882 stq r16, osfsf_a0(sp) // save regs
883 bis r25, r31, r16 // a0 <- va
884
885 stq r17, osfsf_a1(sp) // a1
886 srl r13, 1, r17 // shift fault bits to right position
887
888 stq r11, osfsf_ps(sp) // save old ps
889 bis r12, r31, r11 // update ps
890
891 stq r14, osfsf_pc(sp) // save pc
892 mfpr r25, pt_entmm // get entry point
893
894 stq r29, osfsf_gp(sp) // save gp
895 cmovlbs r17, 1, r17 // a2. acv overrides fox.
896
897 mtpr r25, exc_addr // load exc_addr with entMM
898 // 1 cycle to hw_rei
899 mfpr r29, pt_kgp // get the kgp
900
901 hw_rei_spe // done
902
903//
904//unalign_trap_cont
905// An unalign trap has been taken. Just need to finish up a few things.
906//
907// Current state:
908// r25 - entUna
909// r13 - shifted MMstat
910//
911//
912 ALIGN_BLOCK
913unalign_trap_cont:
914 mtpr r25, exc_addr // load exc_addr with entUna
915 // 1 cycle to hw_rei
916
917
918 mfpr r29, pt_kgp // get the kgp
919 and r13, mm_stat_m_ra, r18 // Clean Ra for a2
920
921 hw_rei_spe // done
922
923
924
925//
926// dfault_in_pal
927// Dfault trap was taken, exc_addr points to a PAL PC.
928// r9 - mmstat<opcode> right justified
929// r8 - exception address
930//
931// These are the cases:
932// opcode was STQ -- from a stack builder, KSP not valid halt
933// r14 - original exc_addr
934// r11 - original PS
935// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
936// KSP not valid halt
937// r11 - original PS
938// r14 - original exc_addr
939// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
940// r11 - original PS
941// r14 - original exc_addr
942// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
943// r10 - original exc_addr
944// r11 - original PS
945//
946//
947//
948 ALIGN_BLOCK
949dfault_in_pal:
950 DEBUGSTORE(0x50)
951 bic r8, 3, r8 // Clean PC
952 mfpr r9, pal_base
953
954 mfpr r31, va // unlock VA
955
956 // if not real_mm, should never get here from miss flows
957
958 subq r9, r8, r8 // pal_base - offset
959
960 lda r9, pal_itb_ldq-pal_base(r8)
961 nop
962
963 beq r9, dfault_do_bugcheck
964 lda r9, pal_dtb_ldq-pal_base(r8)
965
966 beq r9, dfault_do_bugcheck
967
968//
969// KSP invalid halt case --
970ksp_inval_halt:
971 DEBUGSTORE(76)
972 bic r11, osfps_m_mode, r11 // set ps to kernel mode
973 mtpr r0, pt0
974
975 mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
976 mtpr r31, ips
977
978 mtpr r14, exc_addr // Set PC to instruction that caused trouble
979 bsr r0, pal_update_pcb // update the pcb
980
981 lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
982 br r31, sys_enter_console // enter the console
983
984 ALIGN_BRANCH
985dfault_do_bugcheck:
986 bis r10, r31, r14 // bugcheck expects exc_addr in r14
987 br r31, pal_pal_bug_check
988
989
990//
991// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
992// On entry -
993// r14 - exc_addr
994// VA is locked
995//
996//
997 ALIGN_BLOCK
998dfault_fetch_ldr31_err:
999 mtpr r11, ev5__dtb_cm
1000 mtpr r11, ev5__ps // Make sure ps hasn't changed
1001
1002 mfpr r31, va // unlock the mbox
1003 addq r14, 4, r14 // inc the pc to skip the fetch
1004
1005 mtpr r14, exc_addr // give ibox new PC
1006 mfpr r31, pt0 // pad exc_addr write
1007
1008 hw_rei
1009
1010
1011
1012 ALIGN_BLOCK
1013//
1014// sys_from_kern
1015// callsys from kernel mode - OS bugcheck machine check
1016//
1017//
1018sys_from_kern:
1019 mfpr r14, exc_addr // PC points to call_pal
1020 subq r14, 4, r14
1021
1022 lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
1023 br r31, pal_pal_mchk
1024
1025
1026// Continuation of long call_pal flows
1027//
1028// wrent_tbl
1029// Table to write *int in paltemps.
1030// 4 instructions/entry
1031// r16 has new value
1032//
1033//
1034 ALIGN_BLOCK
1035wrent_tbl:
1036//orig pvc_jsr wrent, dest=1
1037 nop
1038 mtpr r16, pt_entint
1039
1040 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1041 hw_rei
1042
1043
1044//orig pvc_jsr wrent, dest=1
1045 nop
1046 mtpr r16, pt_entarith
1047
1048 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1049 hw_rei
1050
1051
1052//orig pvc_jsr wrent, dest=1
1053 nop
1054 mtpr r16, pt_entmm
1055
1056 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1057 hw_rei
1058
1059
1060//orig pvc_jsr wrent, dest=1
1061 nop
1062 mtpr r16, pt_entif
1063
1064 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1065 hw_rei
1066
1067
1068//orig pvc_jsr wrent, dest=1
1069 nop
1070 mtpr r16, pt_entuna
1071
1072 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1073 hw_rei
1074
1075
1076//orig pvc_jsr wrent, dest=1
1077 nop
1078 mtpr r16, pt_entsys
1079
1080 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1081 hw_rei
1082
1083 ALIGN_BLOCK
1084//
1085// tbi_tbl
1086// Table to do tbi instructions
1087// 4 instructions per entry
1088//
1089tbi_tbl:
1090 // -2 tbia
1091//orig pvc_jsr tbi, dest=1
1092 mtpr r31, ev5__dtb_ia // Flush DTB
1093 mtpr r31, ev5__itb_ia // Flush ITB
1094
1095 hw_rei_stall
1096
1097 nop // Pad table
1098
1099 // -1 tbiap
1100//orig pvc_jsr tbi, dest=1
1101 mtpr r31, ev5__dtb_iap // Flush DTB
1102 mtpr r31, ev5__itb_iap // Flush ITB
1103
1104 hw_rei_stall
1105
1106 nop // Pad table
1107
1108
1109 // 0 unused
1110//orig pvc_jsr tbi, dest=1
1111 hw_rei // Pad table
1112 nop
1113 nop
1114 nop
1115
1116
1117 // 1 tbisi
1118//orig pvc_jsr tbi, dest=1
1119
1120 nop
1121 nop
1122 mtpr r17, ev5__itb_is // Flush ITB
1123 hw_rei_stall
1124
1125 // 2 tbisd
1126//orig pvc_jsr tbi, dest=1
1127 mtpr r17, ev5__dtb_is // Flush DTB.
1128 nop
1129
1130 nop
1131 hw_rei_stall
1132
1133
1134 // 3 tbis
1135//orig pvc_jsr tbi, dest=1
1136 mtpr r17, ev5__dtb_is // Flush DTB
1137 br r31, tbi_finish
1138 ALIGN_BRANCH
1139tbi_finish:
1140 mtpr r17, ev5__itb_is // Flush ITB
1141 hw_rei_stall
1142
1143
1144
1145 ALIGN_BLOCK
1146//
1147// bpt_bchk_common:
1148// Finish up the bpt/bchk instructions
1149//
1150bpt_bchk_common:
1151 stq r18, osfsf_a2(sp) // a2
1152 mfpr r13, pt_entif // get entry point
1153
1154 stq r12, osfsf_ps(sp) // save old ps
1155 stq r14, osfsf_pc(sp) // save pc
1156
1157 stq r29, osfsf_gp(sp) // save gp
1158 mtpr r13, exc_addr // load exc_addr with entIF
1159 // 1 cycle to hw_rei
1160
1161 mfpr r29, pt_kgp // get the kgp
1162
1163
1164 hw_rei_spe // done
1165
1166
1167 ALIGN_BLOCK
1168//
1169// rti_to_user
1170// Finish up the rti instruction
1171//
1172rti_to_user:
1173 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
1174 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
1175
1176 mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
1177 mtpr r25, pt_ksp // save off incase RTI to user
1178
1179 mfpr r30, pt_usp
1180 hw_rei_spe // and back
1181
1182
1183 ALIGN_BLOCK
1184//
1185// rti_to_kern
1186// Finish up the rti instruction
1187//
1188rti_to_kern:
1189 and r12, osfps_m_ipl, r11 // clean ps
1190 mfpr r12, pt_intmask // get int mask
1191
1192 extbl r12, r11, r12 // get mask for this ipl
1193 mtpr r25, pt_ksp // save off incase RTI to user
1194
1195 mtpr r12, ev5__ipl // set the new ipl.
1196 or r25, r31, sp // sp
1197
1198// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
1199 hw_rei
1200
1201 ALIGN_BLOCK
1202//
1203// swpctx_cont
1204// Finish up the swpctx instruction
1205//
1206
1207swpctx_cont:
1208
1209 bic r25, r24, r25 // clean icsr<FPE,PMP>
1210 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1211
1212 ldq_p r14, osfpcb_q_mmptr(r16)// get new mmptr
1213 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1214
1215 or r25, r12, r25 // icsr with new fen
1216 srl r23, 32, r24 // move asn to low asn pos
1217
1218 and r22, 1, r22
1219 sll r24, itb_asn_v_asn, r12
1220
1221 sll r22, icsr_v_pmp, r22
1222 nop
1223
1224 or r25, r22, r25 // icsr with new pme
1225
1226 sll r24, dtb_asn_v_asn, r24
1227
1228 subl r23, r13, r13 // gen new cc offset
1229 mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
1230
1231 mtpr r24, dtb_asn // Load up new ASN
1232 mtpr r25, icsr // write the icsr
1233
1234 sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1235 ldq_p r25, osfpcb_q_usp(r16) // get new usp
1236
1237 insll r13, 4, r13 // >> 32
1238// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1239 mtpr r14, pt_ptbr // load the new ptbr
1240
1241 mtpr r13, cc // set new offset
1242 ldq_p r30, osfpcb_q_ksp(r16) // get new ksp
1243
1244// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1245 mtpr r25, pt_usp // save usp
1246
1247no_pm_change_10_: hw_rei_stall // back we go
1248
1249 ALIGN_BLOCK
1250//
1251// swppal_cont - finish up the swppal call_pal
1252//
1253
1254swppal_cont:
1255 mfpr r2, pt_misc // get misc bits
1256 sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1257 or r2, r0, r2 // set the bit
1258 mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
1259 mtpr r2, pt_misc // update the chip
1260
1261 or r3, r31, r4
1262 mfpr r3, pt_impure // pass pointer to the impure area in r3
1263//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
1264//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
1265//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
1266//orig unfix_impure_ipr r3 // restore impure pointer
1267 lda r3, CNS_Q_IPR(r3)
1268 RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1269 RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1270 lda r3, -CNS_Q_IPR(r3)
1271
1272 or r31, r31, r0 // set status to success
1273// pvc_violate 1007
1274 jmp r31, (r4) // and call our friend, it's her problem now
1275
1276
1277swppal_fail:
1278 addq r0, 1, r0 // set unknown pal or not loaded
1279 hw_rei // and return
1280
1281
1282// .sbttl "Memory management"
1283
1284 ALIGN_BLOCK
1285//
1286//foe_ipte_handler
1287// IFOE detected on level 3 pte, sort out FOE vs ACV
1288//
1289// on entry:
1290// with
1291// R8 = pte
1292// R10 = pc
1293//
1294// Function
1295// Determine TNV vs ACV vs FOE. Build stack and dispatch
1296// Will not be here if TNV.
1297//
1298
1299foe_ipte_handler:
1300 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1301 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1302
1303 bis r11, r31, r12 // Save PS for stack write
1304 bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
1305
1306
1307 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1308 // no virt ref for next 2 cycles
1309 mtpr r30, pt_usp // save user stack
1310
1311 bis r31, r31, r11 // Set new PS
1312 mfpr r30, pt_ksp
1313
1314 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1315 nop
1316
1317foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1318 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1319
1320 or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
1321 mfpr r13, pt_entmm // get entry point
1322
1323 stq r16, osfsf_a0(sp) // a0
1324 or r14, r31, r16 // pass pc/va as a0
1325
1326 stq r17, osfsf_a1(sp) // a1
1327 nop
1328
1329 stq r18, osfsf_a2(sp) // a2
1330 lda r17, mmcsr_c_acv(r31) // assume ACV
1331
1332 stq r16, osfsf_pc(sp) // save pc
1333 cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
1334
1335 stq r12, osfsf_ps(sp) // save ps
1336 subq r31, 1, r18 // pass flag of istream as a2
1337
1338 stq r29, osfsf_gp(sp)
1339 mtpr r13, exc_addr // set vector address
1340
1341 mfpr r29, pt_kgp // load kgp
1342 hw_rei_spe // out to exec
1343
1344 ALIGN_BLOCK
1345//
1346//invalid_ipte_handler
1347// TNV detected on level 3 pte, sort out TNV vs ACV
1348//
1349// on entry:
1350// with
1351// R8 = pte
1352// R10 = pc
1353//
1354// Function
1355// Determine TNV vs ACV. Build stack and dispatch.
1356//
1357
1358invalid_ipte_handler:
1359 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1360 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1361
1362 bis r11, r31, r12 // Save PS for stack write
1363 bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
1364
1365
1366 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1367 // no virt ref for next 2 cycles
1368 mtpr r30, pt_usp // save user stack
1369
1370 bis r31, r31, r11 // Set new PS
1371 mfpr r30, pt_ksp
1372
1373 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1374 nop
1375
1376invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1377 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1378
1379 or r10, r31, r14 // Save pc/va in case TBmiss on stack
1380 mfpr r13, pt_entmm // get entry point
1381
1382 stq r16, osfsf_a0(sp) // a0
1383 or r14, r31, r16 // pass pc/va as a0
1384
1385 stq r17, osfsf_a1(sp) // a1
1386 nop
1387
1388 stq r18, osfsf_a2(sp) // a2
1389 and r25, 1, r17 // Isolate kre
1390
1391 stq r16, osfsf_pc(sp) // save pc
1392 xor r17, 1, r17 // map to acv/tnv as a1
1393
1394 stq r12, osfsf_ps(sp) // save ps
1395 subq r31, 1, r18 // pass flag of istream as a2
1396
1397 stq r29, osfsf_gp(sp)
1398 mtpr r13, exc_addr // set vector address
1399
1400 mfpr r29, pt_kgp // load kgp
1401 hw_rei_spe // out to exec
1402
1403
1404
1405
1406 ALIGN_BLOCK
1407//
1408//invalid_dpte_handler
1409// INVALID detected on level 3 pte, sort out TNV vs ACV
1410//
1411// on entry:
1412// with
1413// R10 = va
1414// R8 = pte
1415// R9 = mm_stat
1416// PT6 = pc
1417//
1418// Function
1419// Determine TNV vs ACV. Build stack and dispatch
1420//
1421
1422
1423invalid_dpte_handler:
1424 mfpr r12, pt6
1425 blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
1426
1427 bis r12, r31, r14 // save PC in case of tbmiss or fault
1428 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
1429
1430 mtpr r11, pt0 // Save PS for stack write
1431 and r25, mm_stat_m_opcode, r25 // isolate opcode
1432
1433 cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
1434 blbs r25, nmiss_fetch_ldr31_err // yes
1435
1436 //dismiss exception if load to r31/f31
1437 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
1438
1439 // not a store or fetch, must be a load
1440 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
1441
1442 and r25, 0x1F, r25 // isolate rnum
1443 nop
1444
1445 cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
1446 bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
1447
1448invalid_dpte_no_dismiss:
1449 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1450 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1451
1452 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1453 // no virt ref for next 2 cycles
1454 bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
1455
1456 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1457 mtpr r30, pt_usp // save user stack
1458
1459 bis r31, r31, r11 // Set new PS
1460 mfpr r30, pt_ksp
1461
1462invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
1463 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1464
1465 or r10, r31, r25 // Save va in case TBmiss on stack
1466 and r9, 1, r13 // save r/w flag
1467
1468 stq r16, osfsf_a0(sp) // a0
1469 or r25, r31, r16 // pass va as a0
1470
1471 stq r17, osfsf_a1(sp) // a1
1472 or r31, mmcsr_c_acv, r17 // assume acv
1473
1474 srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
1475 stq r29, osfsf_gp(sp)
1476
1477 stq r18, osfsf_a2(sp) // a2
1478 cmovlbs r13, r25, r12 // if write access move acv based on write enable
1479
1480 or r13, r31, r18 // pass flag of dstream access and read vs write
1481 mfpr r25, pt0 // get ps
1482
1483 stq r14, osfsf_pc(sp) // save pc
1484 mfpr r13, pt_entmm // get entry point
1485
1486 stq r25, osfsf_ps(sp) // save ps
1487 mtpr r13, exc_addr // set vector address
1488
1489 mfpr r29, pt_kgp // load kgp
1490 cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
1491
1492 hw_rei_spe // out to exec
1493
1494//
1495//
1496// We come here if we are erring on a dtb_miss, and the instr is a
1497// fetch, fetch_m, of load to r31/f31.
1498// The PC is incremented, and we return to the program.
1499// essentially ignoring the instruction and error.
1500//
1501//
1502 ALIGN_BLOCK
1503nmiss_fetch_ldr31_err:
1504 mfpr r12, pt6
1505 addq r12, 4, r12 // bump pc to pc+4
1506
1507 mtpr r12, exc_addr // and set entry point
1508 mfpr r31, pt0 // pad exc_addr write
1509
1510 hw_rei //
1511
1512 ALIGN_BLOCK
1513//
1514// double_pte_inv
1515// We had a single tbmiss which turned into a double tbmiss which found
1516// an invalid PTE. Return to single miss with a fake pte, and the invalid
1517// single miss flow will report the error.
1518//
1519// on entry:
1520// r21 PTE
1521// r22 available
1522// VA IPR locked with original fault VA
1523// pt4 saved r21
1524// pt5 saved r22
1525// pt6 original exc_addr
1526//
1527// on return to tbmiss flow:
1528// r8 fake PTE
1529//
1530//
1531//
1532double_pte_inv:
1533 srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
1534 mfpr r22, exc_addr // get the pc
1535
1536 lda r22, 4(r22) // inc the pc
1537 lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
1538
1539 cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
1540 mtpr r22, exc_addr // set for rei
1541
1542 mfpr r21, pt4 // restore regs
1543 mfpr r22, pt5 // restore regs
1544
1545 hw_rei // back to tb miss
1546
1547 ALIGN_BLOCK
1548//
1549//tnv_in_pal
1550// The only places in pal that ld or store are the
1551// stack builders, rti or retsys. Any of these mean we
1552// need to take a ksp not valid halt.
1553//
1554//
1555tnv_in_pal:
1556
1557
1558 br r31, ksp_inval_halt
1559
1560
1561// .sbttl "Icache flush routines"
1562
1563 ALIGN_BLOCK
1564//
1565// Common Icache flush routine.
1566//
1567//
1568//
1569pal_ic_flush:
1570 nop
1571 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
1572 nop
1573 nop
1574
1575// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
1576 nop
1577 nop
1578 nop
1579 nop
1580
1581 nop
1582 nop
1583 nop
1584 nop
1585
1586 nop
1587 nop // 10
1588
1589 nop
1590 nop
1591 nop
1592 nop
1593
1594 nop
1595 nop
1596 nop
1597 nop
1598
1599 nop
1600 nop // 20
1601
1602 nop
1603 nop
1604 nop
1605 nop
1606
1607 nop
1608 nop
1609 nop
1610 nop
1611
1612 nop
1613 nop // 30
1614 nop
1615 nop
1616 nop
1617 nop
1618
1619 nop
1620 nop
1621 nop
1622 nop
1623
1624 nop
1625 nop // 40
1626
1627 nop
1628 nop
1629
1630one_cycle_and_hw_rei:
1631 nop
1632 nop
1633
1634 hw_rei_stall
1635
1636 ALIGN_BLOCK
1637//
1638//osfpal_calpal_opcdec
1639// Here for all opcdec CALL_PALs
1640//
1641// Build stack frame
1642// a0 <- code
1643// a1 <- unpred
1644// a2 <- unpred
1645// vector via entIF
1646//
1647//
1648
1649osfpal_calpal_opcdec:
1650 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1651 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1652
1653 mfpr r14, exc_addr // get pc
1654 nop
1655
1656 bis r11, r31, r12 // Save PS for stack write
1657 bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
1658
1659
1660 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1661 // no virt ref for next 2 cycles
1662 mtpr r30, pt_usp // save user stack
1663
1664 bis r31, r31, r11 // Set new PS
1665 mfpr r30, pt_ksp
1666
1667osfpal_calpal_opcdec_10_:
1668 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1669 nop
1670
1671 stq r16, osfsf_a0(sp) // save regs
1672 bis r31, osf_a0_opdec, r16 // set a0
1673
1674 stq r18, osfsf_a2(sp) // a2
1675 mfpr r13, pt_entif // get entry point
1676
1677 stq r12, osfsf_ps(sp) // save old ps
1678 stq r17, osfsf_a1(sp) // a1
1679
1680 stq r14, osfsf_pc(sp) // save pc
1681 nop
1682
1683 stq r29, osfsf_gp(sp) // save gp
1684 mtpr r13, exc_addr // load exc_addr with entIF
1685 // 1 cycle to hw_rei
1686
1687 mfpr r29, pt_kgp // get the kgp
1688
1689
1690 hw_rei_spe // done
1691
1692
1693
1694
1695
1696//
1697//pal_update_pcb
1698// Update the PCB with the current SP, AST, and CC info
1699//
1700// r0 - return linkage
1701//
1702 ALIGN_BLOCK
1703
1704pal_update_pcb:
1705 mfpr r12, pt_pcbb // get pcbb
1706 and r11, osfps_m_mode, r25 // get mode
1707 beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
1708 mtpr r30, pt_usp // save user stack
1709 stq_p r30, osfpcb_q_usp(r12) // store usp
1710 br r31, pal_update_pcb_20_ // join common
1711pal_update_pcb_10_: stq_p r30, osfpcb_q_ksp(r12) // store ksp
1712pal_update_pcb_20_: rpcc r13 // get cyccounter
1713 srl r13, 32, r14 // move offset
1714 addl r13, r14, r14 // merge for new time
1715 stl_p r14, osfpcb_l_cc(r12) // save time
1716
1717//orig pvc_jsr updpcb, bsr=1, dest=1
1718 ret r31, (r0)
1719
1720
1721//
1722// pal_save_state
1723//
1724// Function
1725// All chip state saved, all PT's, SR's FR's, IPR's
1726//
1727//
1728// Regs' on entry...
1729//
1730// R0 = halt code
1731// pt0 = r0
1732// R1 = pointer to impure
1733// pt4 = r1
1734// R3 = return addr
1735// pt5 = r3
1736//
1737// register usage:
1738// r0 = halt_code
1739// r1 = addr of impure area
1740// r3 = return_address
1741// r4 = scratch
1742//
1743//
1744
1745 ALIGN_BLOCK
1746 .globl pal_save_state
1747pal_save_state:
1748//
1749//
1750// start of implementation independent save routine
1751//
1752// the impure area is larger than the addressibility of hw_ld and hw_st
1753// therefore, we need to play some games: The impure area
1754// is informally divided into the "machine independent" part and the
1755// "machine dependent" part. The state that will be saved in the
1756// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
1757// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
1758// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
1759// macros will automagically adjust the offset correctly.
1760//
1761
1762// The distributed code is commented out and followed by corresponding SRC code.
1763// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
1764
1765//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
1766 lda r1, 0x200(r1) // Point to center of CPU segment
1767//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
1768 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
1769//orig store_reg1 hlt, r0, r1, ipr=1
1770 SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
1771
1772 mfpr r0, pt0 // get r0 back //orig
1773//orig store_reg1 0, r0, r1 // save r0
1774 SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
1775
1776 mfpr r0, pt4 // get r1 back //orig
1777//orig store_reg1 1, r0, r1 // save r1
1778 SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
1779
1780//orig store_reg 2 // save r2
1781 SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
1782
1783 mfpr r0, pt5 // get r3 back //orig
1784//orig store_reg1 3, r0, r1 // save r3
1785 SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
1786
1787 // reason code has been saved
1788 // r0 has been saved
1789 // r1 has been saved
1790 // r2 has been saved
1791 // r3 has been saved
1792 // pt0, pt4, pt5 have been lost
1793
1794 //
1795 // Get out of shadow mode
1796 //
1797
1798 mfpr r2, icsr // Get icsr
1799 ldah r0, (1<<(icsr_v_sde-16))(r31)
1800 bic r2, r0, r0 // ICSR with SDE clear
1801 mtpr r0, icsr // Turn off SDE
1802
1803 mfpr r31, pt0 // SDE bubble cycle 1
1804 mfpr r31, pt0 // SDE bubble cycle 2
1805 mfpr r31, pt0 // SDE bubble cycle 3
1806 nop
1807
1808
1809 // save integer regs R4-r31
1810 SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
1811 SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
1812 SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
1813 SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
1814 SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
1815 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
1816 SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
1817 SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
1818 SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
1819 SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
1820 SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
1821 SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
1822 SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
1823 SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
1824 SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
1825 SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
1826 SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
1827 SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
1828 SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
1829 SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
1830 SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
1831 SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
1832 SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
1833 SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
1834 SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
1835 SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
1836 SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
1837 SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
1838
1839 // save all paltemp regs except pt0
1840
1841//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
1842//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
1843
1844 lda r1, -0x200(r1) // Restore the impure base address.
1845 lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
1846 SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
1847 SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
1848 SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
1849 SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
1850 SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
1851 SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
1852 SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
1853 SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
1854 SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
1855 SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
1856 SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
1857 SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
1858 SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
1859 SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
1860 SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
1861 SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
1862 SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
1863 SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
1864 SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
1865 SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
1866 SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
1867 SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
1868 SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
1869 SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
1870
1871 // Restore shadow mode
1872 mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write)
1873 mfpr r31, pt0
1874 mtpr r2, icsr // Restore original ICSR
1875
1876 mfpr r31, pt0 // SDE bubble cycle 1
1877 mfpr r31, pt0 // SDE bubble cycle 2
1878 mfpr r31, pt0 // SDE bubble cycle 3
1879 nop
1880
1881 // save all integer shadow regs
1882 SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
1883 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
1884 SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
1885 SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
1886 SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
1887 SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
1888 SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
1889 SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
1890
1891 SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
1892 SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
1893 SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
1894 SAVE_IPR(va,CNS_Q_VA,r1)
1895 SAVE_IPR(icsr,CNS_Q_ICSR,r1)
1896 SAVE_IPR(ipl,CNS_Q_IPL,r1)
1897 SAVE_IPR(ips,CNS_Q_IPS,r1)
1898 SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
1899 SAVE_IPR(aster,CNS_Q_ASTER,r1)
1900 SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
1901 SAVE_IPR(sirr,CNS_Q_SIRR,r1)
1902 SAVE_IPR(isr,CNS_Q_ISR,r1)
1903 SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
1904 SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
1905 SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
1906
1907//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
1908//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
1909//orig // PVC violation applies only to
1910pvc$osf35$379: // loads. HW_ST ok here, so ignore
1911 SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
1912
1913
1914 //the following iprs are informational only -- will not be restored
1915
1916 SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
1917 SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
1918 SAVE_IPR(intId,CNS_Q_INT_ID,r1)
1919 SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
1920 SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
1921 ldah r14, 0xFFF0(zero)
1922 zap r14, 0xE0, r14 // Get base address of CBOX IPRs
1923 NOP // Pad mfpr dcPerr out of shadow of
1924 NOP // last store
1925 NOP
1926 SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
1927
1928 // read cbox ipr state
1929
1930 mb
1931 ldq_p r2, scCtl(r14)
1932 ldq_p r13, ldLock(r14)
1933 ldq_p r4, scAddr(r14)
1934 ldq_p r5, eiAddr(r14)
1935 ldq_p r6, bcTagAddr(r14)
1936 ldq_p r7, fillSyn(r14)
1937 bis r5, r4, zero // Make sure all loads complete before
1938 bis r7, r6, zero // reading registers that unlock them.
1939 ldq_p r8, scStat(r14) // Unlocks scAddr.
1940 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
1941 ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
1942 mb
1943
1944 // save cbox ipr state
1945 SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
1946 SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
1947 SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
1948 SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
1949 SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
1950 SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
1951 SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
1952 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
1953 //bc_config? sl_rcv?
1954
1955// restore impure base
1956//orig unfix_impure_ipr r1
1957 lda r1, -CNS_Q_IPR(r1)
1958
1959// save all floating regs
1960 mfpr r0, icsr // get icsr
1961 or r31, 1, r2 // get a one
1962 sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
1963 or r2, r0, r0 // set FEN on
1964 mtpr r0, icsr // write to icsr, enabling FEN
1965
1966// map the save area virtually
1967 mtpr r31, dtbIa // Clear all DTB entries
1968 srl r1, va_s_off, r0 // Clean off byte-within-page offset
1969 sll r0, pte_v_pfn, r0 // Shift to form PFN
1970 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1971 mtpr r0, dtbPte // Load the PTE and set valid
1972 mtpr r1, dtbTag // Write the PTE and tag into the DTB
1973
1974
1975// map the next page too - in case the impure area crosses a page boundary
1976 lda r4, (1<<va_s_off)(r1) // Generate address for next page
1977 srl r4, va_s_off, r0 // Clean off byte-within-page offset
1978 sll r0, pte_v_pfn, r0 // Shift to form PFN
1979 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1980 mtpr r0, dtbPte // Load the PTE and set valid
1981 mtpr r4, dtbTag // Write the PTE and tag into the DTB
1982
1983 sll r31, 0, r31 // stall cycle 1
1984 sll r31, 0, r31 // stall cycle 2
1985 sll r31, 0, r31 // stall cycle 3
1986 nop
1987
1988// add offset for saving fpr regs
1989//orig fix_impure_gpr r1
1990 lda r1, 0x200(r1) // Point to center of CPU segment
1991
1992// now save the regs - F0-F31
1993 mf_fpcr f0 // original
1994
1995 SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
1996 SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
1997 SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
1998 SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
1999 SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
2000 SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
2001 SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
2002 SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
2003 SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
2004 SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
2005 SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
2006 SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
2007 SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
2008 SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
2009 SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
2010 SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
2011 SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
2012 SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
2013 SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
2014 SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
2015 SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2016 SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2017 SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2018 SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2019 SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2020 SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2021 SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2022 SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2023 SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2024 SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2025 SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2026 SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2027
2028//switch impure offset from gpr to ipr---
2029//orig unfix_impure_gpr r1
2030//orig fix_impure_ipr r1
2031//orig store_reg1 fpcsr, f0, r1, fpcsr=1
2032
2033 SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach
2034 lda r1, -0x200(r1) // Restore the impure base address
2035
2036// and back to gpr ---
2037//orig unfix_impure_ipr r1
2038//orig fix_impure_gpr r1
2039
2040//orig lda r0, cns_mchksize(r31) // get size of mchk area
2041//orig store_reg1 mchkflag, r0, r1, ipr=1
2042//orig mb
2043
2044 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
2045 // save this using the IPR base (it is closer) not the GRP base as they used...pb
2046 lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
2047 SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2048 mb
2049
2050//orig or r31, 1, r0 // get a one
2051//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
2052//orig mb
2053
2054 lda r1, -CNS_Q_IPR(r1) // back to the base
2055 lda r1, 0x200(r1) // Point to center of CPU segment
2056 or r31, 1, r0 // get a one
2057 SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
2058 mb
2059
2060 // restore impure area base
2061//orig unfix_impure_gpr r1
2062 lda r1, -0x200(r1) // Point to center of CPU segment
2063
2064 mtpr r31, dtb_ia // clear the dtb
2065 mtpr r31, itb_ia // clear the itb
2066
2067//orig pvc_jsr savsta, bsr=1, dest=1
2068 ret r31, (r3) // and back we go
2069
2070
2071
2072// .sbttl "PAL_RESTORE_STATE"
2073//
2074//
2075// Pal_restore_state
2076//
2077//
2078// register usage:
2079// r1 = addr of impure area
2080// r3 = return_address
2081// all other regs are scratchable, as they are about to
2082// be reloaded from ram.
2083//
2084// Function:
2085// All chip state restored, all SRs, FRs, PTs, IPRs
2086// *** except R1, R3, PT0, PT4, PT5 ***
2087//
2088//
2089 ALIGN_BLOCK
2090pal_restore_state:
2091
2092//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2093
2094// map the console io area virtually
2095 mtpr r31, dtbIa // Clear all DTB entries
2096 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2097 sll r0, pte_v_pfn, r0 // Shift to form PFN
2098 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2099 mtpr r0, dtbPte // Load the PTE and set valid
2100 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2101
2102
2103// map the next page too, in case impure area crosses page boundary
2104 lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
2105 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2106 sll r0, pte_v_pfn, r0 // Shift to form PFN
2107 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2108 mtpr r0, dtbPte // Load the PTE and set valid
2109 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2110
2111// save all floating regs
2112 mfpr r0, icsr // Get current ICSR
2113 bis zero, 1, r2 // Get a '1'
2114 or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
2115 sll r2, icsr_v_fpe, r2 // Shift bits into position
2116 bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
2117 mtpr r0, icsr // Update the chip
2118
2119 mfpr r31, pt0 // FPE bubble cycle 1 //orig
2120 mfpr r31, pt0 // FPE bubble cycle 2 //orig
2121 mfpr r31, pt0 // FPE bubble cycle 3 //orig
2122
2123//orig fix_impure_ipr r1
2124//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
2125//orig mt_fpcr f0
2126//orig
2127//orig unfix_impure_ipr r1
2128//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
2129 lda r1, 200(r1) // Point to base of IPR area again
2130 RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
2131 mt_fpcr f0 // original
2132
2133 lda r1, 0x200(r1) // point to center of CPU segment
2134
2135// restore all floating regs
2136 RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
2137 RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
2138 RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
2139 RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
2140 RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
2141 RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
2142 RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
2143 RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
2144 RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
2145 RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
2146 RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
2147 RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
2148 RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
2149 RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
2150 RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
2151 RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
2152 RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
2153 RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
2154 RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
2155 RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
2156 RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2157 RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2158 RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2159 RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2160 RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2161 RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2162 RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2163 RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2164 RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2165 RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2166 RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2167 RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2168
2169// switch impure pointer from gpr to ipr area --
2170//orig unfix_impure_gpr r1
2171//orig fix_impure_ipr r1
2172 lda r1, -0x200(r1) // Restore base address of impure area.
2173 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
2174
2175// restore all pal regs
2176 RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
2177 RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
2178 RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
2179 RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
2180 RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
2181 RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
2182 RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
2183 RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
2184 RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
2185 RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
2186 RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
2187 RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
2188 RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
2189 RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
2190 RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
2191 RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
2192 RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
2193 RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
2194 RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
2195 RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
2196 RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2197 RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2198 RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2199 RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2200
2201
2202//orig restore_reg exc_addr, ipr=1 // restore ipr
2203//orig restore_reg pal_base, ipr=1 // restore ipr
2204//orig restore_reg ipl, ipr=1 // restore ipr
2205//orig restore_reg ps, ipr=1 // restore ipr
2206//orig mtpr r0, dtb_cm // set current mode in mbox too
2207//orig restore_reg itb_asn, ipr=1
2208//orig srl r0, itb_asn_v_asn, r0
2209//orig sll r0, dtb_asn_v_asn, r0
2210//orig mtpr r0, dtb_asn // set ASN in Mbox too
2211//orig restore_reg ivptbr, ipr=1
2212//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
2213//orig restore_reg mcsr, ipr=1
2214//orig restore_reg aster, ipr=1
2215//orig restore_reg astrr, ipr=1
2216//orig restore_reg sirr, ipr=1
2217//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
2218//orig mfpr r31, pt0 // (may issue with mt maf_mode)
2219//orig mfpr r31, pt0 // bubble cycle 1
2220//orig mfpr r31, pt0 // bubble cycle 2
2221//orig mfpr r31, pt0 // bubble cycle 3
2222//orig mfpr r31, pt0 // (may issue with following ld)
2223
2224 // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
2225 RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2226 RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2227 RESTORE_IPR(ipl,CNS_Q_IPL,r1)
2228 RESTORE_IPR(ips,CNS_Q_IPS,r1)
2229 mtpr r0, dtbCm // Set Mbox current mode too.
2230 RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2231 srl r0, 4, r0
2232 sll r0, 57, r0
2233 mtpr r0, dtbAsn // Set Mbox ASN too
2234 RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2235 mtpr r0, mVptBr // Set Mbox VptBr too
2236 RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
2237 RESTORE_IPR(aster,CNS_Q_ASTER,r1)
2238 RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
2239 RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
2240 RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
2241 STALL
2242 STALL
2243 STALL
2244 STALL
2245 STALL
2246
2247
2248 // restore all integer shadow regs
2249 RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
2250 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2251 RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2252 RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2253 RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2254 RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2255 RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2256 RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2257 RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2258
2259 //
2260 // Get out of shadow mode
2261 //
2262
2263 mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway)
2264 mfpr r31, pt0 // ""
2265 mfpr r0, icsr // Get icsr
2266 ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location
2267 bic r0, r2, r2 // ICSR with SDE clear
2268 mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles
2269
2270 mfpr r31, pt0 // SDE bubble cycle 1
2271 mfpr r31, pt0 // SDE bubble cycle 2
2272 mfpr r31, pt0 // SDE bubble cycle 3
2273 nop
2274
2275// switch impure pointer from ipr to gpr area --
2276//orig unfix_impure_ipr r1
2277//orig fix_impure_gpr r1
2278
2279// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
2280
2281 lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
2282 lda r1, 0x200(r1) // Point to center of CPU segment
2283
2284 // restore all integer regs
2285 RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
2286 RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
2287 RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
2288 RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
2289 RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
2290 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
2291 RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
2292 RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
2293 RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
2294 RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
2295 RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
2296 RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
2297 RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
2298 RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
2299 RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
2300 RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
2301 RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2302 RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2303 RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2304 RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2305 RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2306 RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2307 RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2308 RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2309 RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2310 RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2311 RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2312 RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2313
2314//orig // switch impure pointer from gpr to ipr area --
2315//orig unfix_impure_gpr r1
2316//orig fix_impure_ipr r1
2317//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
2318
2319 lda t0, -0x200(t0) // Restore base address of impure area.
2320 lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
2321 RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
2322
2323//orig // and back again --
2324//orig unfix_impure_ipr r1
2325//orig fix_impure_gpr r1
2326//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
2327//orig mb
2328
2329 lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
2330 lda t0, 0x200(t0) // and back to center of CPU segment
2331 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
2332 mb
2333
2334//orig // and back we go
2335//orig// restore_reg 3
2336//orig restore_reg 2
2337//orig// restore_reg 1
2338//orig restore_reg 0
2339//orig // restore impure area base
2340//orig unfix_impure_gpr r1
2341
2342 RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
2343 RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
2344 lda r1, -0x200(r1) // Restore impure base address
2345
2346 mfpr r31, pt0 // stall for ldq_p above //orig
2347
2348 mtpr r31, dtb_ia // clear the tb //orig
2349 mtpr r31, itb_ia // clear the itb //orig
2350
2351//orig pvc_jsr rststa, bsr=1, dest=1
2352 ret r31, (r3) // back we go //orig
2353
2354
2355//
2356// pal_pal_bug_check -- code has found a bugcheck situation.
2357// Set things up and join common machine check flow.
2358//
2359// Input:
2360// r14 - exc_addr
2361//
2362// On exit:
2363// pt0 - saved r0
2364// pt1 - saved r1
2365// pt4 - saved r4
2366// pt5 - saved r5
2367// pt6 - saved r6
2368// pt10 - saved exc_addr
2369// pt_misc<47:32> - mchk code
2370// pt_misc<31:16> - scb vector
2371// r14 - base of Cbox IPRs in IO space
2372// MCES<mchk> is set
2373//
2374
2375 ALIGN_BLOCK
2376 .globl pal_pal_bug_check_from_int
2377pal_pal_bug_check_from_int:
2378 DEBUGSTORE(0x79)
2379//simos DEBUG_EXC_ADDR()
2380 DEBUGSTORE(0x20)
2381//simos bsr r25, put_hex
2382 lda r25, mchk_c_bugcheck(r31)
2383 addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
2384 br r31, pal_pal_mchk
2385 nop
2386
2387pal_pal_bug_check:
2388 lda r25, mchk_c_bugcheck(r31)
2389
2390pal_pal_mchk:
2391 sll r25, 32, r25 // Move mchk code to position
2392
2393 mtpr r14, pt10 // Stash exc_addr
2394 mtpr r14, exc_addr
2395
2396 mfpr r12, pt_misc // Get MCES and scratch
2397 zap r12, 0x3c, r12
2398
2399 or r12, r25, r12 // Combine mchk code
2400 lda r25, scb_v_procmchk(r31) // Get SCB vector
2401
2402 sll r25, 16, r25 // Move SCBv to position
2403 or r12, r25, r25 // Combine SCBv
2404
2405 mtpr r0, pt0 // Stash for scratch
2406 bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
2407
2408 mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
2409 ldah r14, 0xfff0(r31)
2410
2411 mtpr r1, pt1 // Stash for scratch
2412 zap r14, 0xE0, r14 // Get Cbox IPR base
2413
2414 mtpr r4, pt4
2415 mtpr r5, pt5
2416
2417 mtpr r6, pt6
2418 blbs r12, sys_double_machine_check // MCHK halt if double machine check
2419
2420 br r31, sys_mchk_collect_iprs // Join common machine check flow
2421
2422
2423
2424// align_to_call_pal_section
2425// Align to address of first call_pal entry point - 2000
2426
2427//
2428// HALT - PALcode for HALT instruction
2429//
2430// Entry:
2431// Vectored into via hardware PALcode instruction dispatch.
2432//
2433// Function:
2434// GO to console code
2435//
2436//
2437
2438 .text 1
2439// . = 0x2000
2440 CALL_PAL_PRIV(PAL_HALT_ENTRY)
2441call_pal_halt:
2442 mfpr r31, pt0 // Pad exc_addr read
2443 mfpr r31, pt0
2444
2445 mfpr r12, exc_addr // get PC
2446 subq r12, 4, r12 // Point to the HALT
2447
2448 mtpr r12, exc_addr
2449 mtpr r0, pt0
2450
2451//orig pvc_jsr updpcb, bsr=1
2452 bsr r0, pal_update_pcb // update the pcb
2453 lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
2454 br r31, sys_enter_console // enter the console
2455
2456//
2457// CFLUSH - PALcode for CFLUSH instruction
2458//
2459// Entry:
2460// Vectored into via hardware PALcode instruction dispatch.
2461//
2462// R16 - contains the PFN of the page to be flushed
2463//
2464// Function:
2465// Flush all Dstream caches of 1 entire page
2466// The CFLUSH routine is in the system specific module.
2467//
2468//
2469
2470 CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
2471Call_Pal_Cflush:
2472 br r31, sys_cflush
2473
2474//
2475// DRAINA - PALcode for DRAINA instruction
2476//
2477// Entry:
2478// Vectored into via hardware PALcode instruction dispatch.
2479// Implicit TRAPB performed by hardware.
2480//
2481// Function:
2482// Stall instruction issue until all prior instructions are guaranteed to
2483// complete without incurring aborts. For the EV5 implementation, this
2484// means waiting until all pending DREADS are returned.
2485//
2486//
2487
2488 CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
2489Call_Pal_Draina:
2490 ldah r14, 0x100(r31) // Init counter. Value?
2491 nop
2492
2493DRAINA_LOOP:
2494 subq r14, 1, r14 // Decrement counter
2495 mfpr r13, ev5__maf_mode // Fetch status bit
2496
2497 srl r13, maf_mode_v_dread_pending, r13
2498 ble r14, DRAINA_LOOP_TOO_LONG
2499
2500 nop
2501 blbs r13, DRAINA_LOOP // Wait until all DREADS clear
2502
2503 hw_rei
2504
2505DRAINA_LOOP_TOO_LONG:
2506 br r31, call_pal_halt
2507
2508// CALL_PAL OPCDECs
2509
2510 CALL_PAL_PRIV(0x0003)
2511CallPal_OpcDec03:
2512 br r31, osfpal_calpal_opcdec
2513
2514 CALL_PAL_PRIV(0x0004)
2515CallPal_OpcDec04:
2516 br r31, osfpal_calpal_opcdec
2517
2518 CALL_PAL_PRIV(0x0005)
2519CallPal_OpcDec05:
2520 br r31, osfpal_calpal_opcdec
2521
2522 CALL_PAL_PRIV(0x0006)
2523CallPal_OpcDec06:
2524 br r31, osfpal_calpal_opcdec
2525
2526 CALL_PAL_PRIV(0x0007)
2527CallPal_OpcDec07:
2528 br r31, osfpal_calpal_opcdec
2529
2530 CALL_PAL_PRIV(0x0008)
2531CallPal_OpcDec08:
2532 br r31, osfpal_calpal_opcdec
2533
2534//
2535// CSERVE - PALcode for CSERVE instruction
2536//
2537// Entry:
2538// Vectored into via hardware PALcode instruction dispatch.
2539//
2540// Function:
2541// Various functions for private use of console software
2542//
2543// option selector in r0
2544// arguments in r16....
2545// The CSERVE routine is in the system specific module.
2546//
2547//
2548
2549 CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
2550Call_Pal_Cserve:
2551 br r31, sys_cserve
2552
2553//
2554// swppal - PALcode for swppal instruction
2555//
2556// Entry:
2557// Vectored into via hardware PALcode instruction dispatch.
2558// Vectored into via hardware PALcode instruction dispatch.
2559// R16 contains the new PAL identifier
2560// R17:R21 contain implementation-specific entry parameters
2561//
2562// R0 receives status:
2563// 0 success (PAL was switched)
2564// 1 unknown PAL variant
2565// 2 known PAL variant, but PAL not loaded
2566//
2567//
2568// Function:
2569// Swap control to another PAL.
2570//
2571
2572 CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
2573Call_Pal_Swppal:
2574 cmpule r16, 255, r0 // see if a kibble was passed
2575 cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
2576
2577 or r16, r31, r3 // set r3 incase this is a address
2578 blbc r0, swppal_cont // nope, try it as an address
2579
2580 cmpeq r16, 2, r0 // is it our friend OSF?
2581 blbc r0, swppal_fail // nope, don't know this fellow
2582
2583 br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
2584
2585// .global osfpal_hw_entry_reset
2586// .weak osfpal_hw_entry_reset
2587// .long <osfpal_hw_entry_reset-pal_start>
2588//orig halt // don't know how to get the address here - kludge ok, load pal at 0
2589 .long 0 // ?? hack upon hack...pb
2590
2591CALL_PAL_SWPPAL_10_: ldl_p r3, 0(r2) // fetch target addr
2592// ble r3, swppal_fail ; if OSF not linked in say not loaded.
2593 mfpr r2, pal_base // fetch pal base
2594
2595 addq r2, r3, r3 // add pal base
2596 lda r2, 0x3FFF(r31) // get pal base checker mask
2597
2598 and r3, r2, r2 // any funky bits set?
2599 cmpeq r2, 0, r0 //
2600
2601 blbc r0, swppal_fail // return unknown if bad bit set.
2602 br r31, swppal_cont
2603
2604// .sbttl "CALL_PAL OPCDECs"
2605
2606 CALL_PAL_PRIV(0x000B)
2607CallPal_OpcDec0B:
2608 br r31, osfpal_calpal_opcdec
2609
2610 CALL_PAL_PRIV(0x000C)
2611CallPal_OpcDec0C:
2612 br r31, osfpal_calpal_opcdec
2613
2614//
2615// wripir - PALcode for wripir instruction
2616//
2617// Entry:
2618// Vectored into via hardware PALcode instruction dispatch.
2619// r16 = processor number to interrupt
2620//
2621// Function:
2622// IPIR <- R16
2623// Handled in system-specific code
2624//
2625// Exit:
2626// interprocessor interrupt is recorded on the target processor
2627// and is initiated when the proper enabling conditions are present.
2628//
2629
2630 CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
2631Call_Pal_Wrpir:
2632 br r31, sys_wripir
2633
2634// .sbttl "CALL_PAL OPCDECs"
2635
2636 CALL_PAL_PRIV(0x000E)
2637CallPal_OpcDec0E:
2638 br r31, osfpal_calpal_opcdec
2639
2640 CALL_PAL_PRIV(0x000F)
2641CallPal_OpcDec0F:
2642 br r31, osfpal_calpal_opcdec
2643
2644//
2645// rdmces - PALcode for rdmces instruction
2646//
2647// Entry:
2648// Vectored into via hardware PALcode instruction dispatch.
2649//
2650// Function:
2651// R0 <- ZEXT(MCES)
2652//
2653
2654 CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
2655Call_Pal_Rdmces:
2656 mfpr r0, pt_mces // Read from PALtemp
2657 and r0, mces_m_all, r0 // Clear other bits
2658
2659 hw_rei
2660
2661//
2662// wrmces - PALcode for wrmces instruction
2663//
2664// Entry:
2665// Vectored into via hardware PALcode instruction dispatch.
2666//
2667// Function:
2668// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
2669// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
2670// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
2671// MCES<3> <- R16<3> (DPC)
2672// MCES<4> <- R16<4> (DSC)
2673//
2674//
2675
2676 CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
2677Call_Pal_Wrmces:
2678 and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
2679 mfpr r14, pt_mces // Get current value
2680
2681 ornot r31, r13, r13 // Flip all the bits
2682 and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
2683
2684 and r14, r13, r1 // Update MCHK, SCE, PCE
2685 bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
2686
2687 or r1, r17, r1 // Update DPC and DSC
2688 mtpr r1, pt_mces // Write MCES back
2689
2690 nop // Pad to fix PT write->read restriction
2691
2692 nop
2693 hw_rei
2694
2695
2696
2697// CALL_PAL OPCDECs
2698
2699 CALL_PAL_PRIV(0x0012)
2700CallPal_OpcDec12:
2701 br r31, osfpal_calpal_opcdec
2702
2703 CALL_PAL_PRIV(0x0013)
2704CallPal_OpcDec13:
2705 br r31, osfpal_calpal_opcdec
2706
2707 CALL_PAL_PRIV(0x0014)
2708CallPal_OpcDec14:
2709 br r31, osfpal_calpal_opcdec
2710
2711 CALL_PAL_PRIV(0x0015)
2712CallPal_OpcDec15:
2713 br r31, osfpal_calpal_opcdec
2714
2715 CALL_PAL_PRIV(0x0016)
2716CallPal_OpcDec16:
2717 br r31, osfpal_calpal_opcdec
2718
2719 CALL_PAL_PRIV(0x0017)
2720CallPal_OpcDec17:
2721 br r31, osfpal_calpal_opcdec
2722
2723 CALL_PAL_PRIV(0x0018)
2724CallPal_OpcDec18:
2725 br r31, osfpal_calpal_opcdec
2726
2727 CALL_PAL_PRIV(0x0019)
2728CallPal_OpcDec19:
2729 br r31, osfpal_calpal_opcdec
2730
2731 CALL_PAL_PRIV(0x001A)
2732CallPal_OpcDec1A:
2733 br r31, osfpal_calpal_opcdec
2734
2735 CALL_PAL_PRIV(0x001B)
2736CallPal_OpcDec1B:
2737 br r31, osfpal_calpal_opcdec
2738
2739 CALL_PAL_PRIV(0x001C)
2740CallPal_OpcDec1C:
2741 br r31, osfpal_calpal_opcdec
2742
2743 CALL_PAL_PRIV(0x001D)
2744CallPal_OpcDec1D:
2745 br r31, osfpal_calpal_opcdec
2746
2747 CALL_PAL_PRIV(0x001E)
2748CallPal_OpcDec1E:
2749 br r31, osfpal_calpal_opcdec
2750
2751 CALL_PAL_PRIV(0x001F)
2752CallPal_OpcDec1F:
2753 br r31, osfpal_calpal_opcdec
2754
2755 CALL_PAL_PRIV(0x0020)
2756CallPal_OpcDec20:
2757 br r31, osfpal_calpal_opcdec
2758
2759 CALL_PAL_PRIV(0x0021)
2760CallPal_OpcDec21:
2761 br r31, osfpal_calpal_opcdec
2762
2763 CALL_PAL_PRIV(0x0022)
2764CallPal_OpcDec22:
2765 br r31, osfpal_calpal_opcdec
2766
2767 CALL_PAL_PRIV(0x0023)
2768CallPal_OpcDec23:
2769 br r31, osfpal_calpal_opcdec
2770
2771 CALL_PAL_PRIV(0x0024)
2772CallPal_OpcDec24:
2773 br r31, osfpal_calpal_opcdec
2774
2775 CALL_PAL_PRIV(0x0025)
2776CallPal_OpcDec25:
2777 br r31, osfpal_calpal_opcdec
2778
2779 CALL_PAL_PRIV(0x0026)
2780CallPal_OpcDec26:
2781 br r31, osfpal_calpal_opcdec
2782
2783 CALL_PAL_PRIV(0x0027)
2784CallPal_OpcDec27:
2785 br r31, osfpal_calpal_opcdec
2786
2787 CALL_PAL_PRIV(0x0028)
2788CallPal_OpcDec28:
2789 br r31, osfpal_calpal_opcdec
2790
2791 CALL_PAL_PRIV(0x0029)
2792CallPal_OpcDec29:
2793 br r31, osfpal_calpal_opcdec
2794
2795 CALL_PAL_PRIV(0x002A)
2796CallPal_OpcDec2A:
2797 br r31, osfpal_calpal_opcdec
2798
2799//
2800// wrfen - PALcode for wrfen instruction
2801//
2802// Entry:
2803// Vectored into via hardware PALcode instruction dispatch.
2804//
2805// Function:
2806// a0<0> -> ICSR<FPE>
2807// Store new FEN in PCB
2808// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16)
2809// are UNPREDICTABLE
2810//
2811// Issue: What about pending FP loads when FEN goes from on->off????
2812//
2813
2814 CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
2815Call_Pal_Wrfen:
2816 or r31, 1, r13 // Get a one
2817 mfpr r1, ev5__icsr // Get current FPE
2818
2819 sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
2820 and r16, 1, r16 // clean new fen
2821
2822 sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
2823 bic r1, r13, r1 // zero icsr<fpe>
2824
2825 or r1, r12, r1 // Or new FEN into ICSR
2826 mfpr r12, pt_pcbb // Get PCBB - E1
2827
2828 mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
2829 stl_p r16, osfpcb_q_fen(r12) // Store FEN in PCB.
2830
2831 mfpr r31, pt0 // Pad ICSR<FPE> write.
2832 mfpr r31, pt0
2833
2834 mfpr r31, pt0
2835// pvc_violate 225 // cuz PVC can't distinguish which bits changed
2836 hw_rei
2837
2838
2839 CALL_PAL_PRIV(0x002C)
2840CallPal_OpcDec2C:
2841 br r31, osfpal_calpal_opcdec
2842
2843//
2844// wrvptpr - PALcode for wrvptpr instruction
2845//
2846// Entry:
2847// Vectored into via hardware PALcode instruction dispatch.
2848//
2849// Function:
2850// vptptr <- a0 (r16)
2851//
2852
2853 CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
2854Call_Pal_Wrvptptr:
2855 mtpr r16, ev5__mvptbr // Load Mbox copy
2856 mtpr r16, ev5__ivptbr // Load Ibox copy
2857 nop // Pad IPR write
2858 nop
2859 hw_rei
2860
2861 CALL_PAL_PRIV(0x002E)
2862CallPal_OpcDec2E:
2863 br r31, osfpal_calpal_opcdec
2864
2865 CALL_PAL_PRIV(0x002F)
2866CallPal_OpcDec2F:
2867 br r31, osfpal_calpal_opcdec
2868
2869
2870//
2871// swpctx - PALcode for swpctx instruction
2872//
2873// Entry:
2874// hardware dispatch via callPal instruction
2875// R16 -> new pcb
2876//
2877// Function:
2878// dynamic state moved to old pcb
2879// new state loaded from new pcb
2880// pcbb pointer set
2881// old pcbb returned in R0
2882//
2883// Note: need to add perf monitor stuff
2884//
2885
2886 CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
2887Call_Pal_Swpctx:
2888 rpcc r13 // get cyccounter
2889 mfpr r0, pt_pcbb // get pcbb
2890
2891 ldq_p r22, osfpcb_q_fen(r16) // get new fen/pme
2892 ldq_p r23, osfpcb_l_cc(r16) // get new asn
2893
2894 srl r13, 32, r25 // move offset
2895 mfpr r24, pt_usp // get usp
2896
2897 stq_p r30, osfpcb_q_ksp(r0) // store old ksp
2898// pvc_violate 379 // stq_p can't trap except replay. only problem if mf same ipr in same shadow.
2899 mtpr r16, pt_pcbb // set new pcbb
2900
2901 stq_p r24, osfpcb_q_usp(r0) // store usp
2902 addl r13, r25, r25 // merge for new time
2903
2904 stl_p r25, osfpcb_l_cc(r0) // save time
2905 ldah r24, (1<<(icsr_v_fpe-16))(r31)
2906
2907 and r22, 1, r12 // isolate fen
2908 mfpr r25, icsr // get current icsr
2909
2910 lda r24, (1<<icsr_v_pmp)(r24)
2911 br r31, swpctx_cont
2912
2913//
2914// wrval - PALcode for wrval instruction
2915//
2916// Entry:
2917// Vectored into via hardware PALcode instruction dispatch.
2918//
2919// Function:
2920// sysvalue <- a0 (r16)
2921//
2922
2923 CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
2924Call_Pal_Wrval:
2925 nop
2926 mtpr r16, pt_sysval // Pad paltemp write
2927 nop
2928 nop
2929 hw_rei
2930
2931//
2932// rdval - PALcode for rdval instruction
2933//
2934// Entry:
2935// Vectored into via hardware PALcode instruction dispatch.
2936//
2937// Function:
2938// v0 (r0) <- sysvalue
2939//
2940
2941 CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
2942Call_Pal_Rdval:
2943 nop
2944 mfpr r0, pt_sysval
2945 nop
2946 hw_rei
2947
2948//
2949// tbi - PALcode for tbi instruction
2950//
2951// Entry:
2952// Vectored into via hardware PALcode instruction dispatch.
2953//
2954// Function:
2955// TB invalidate
2956// r16/a0 = TBI type
2957// r17/a1 = Va for TBISx instructions
2958//
2959
2960 CALL_PAL_PRIV(PAL_TBI_ENTRY)
2961Call_Pal_Tbi:
2962 addq r16, 2, r16 // change range to 0-2
2963 br r23, CALL_PAL_tbi_10_ // get our address
2964
2965CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
2966 lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
2967 sll r16, 4, r16 // * 16
2968 blbc r22, CALL_PAL_tbi_30_ // go rei, if not
2969
2970 addq r23, r16, r23 // addr of our code
2971//orig pvc_jsr tbi
2972 jmp r31, (r23) // and go do it
2973
2974CALL_PAL_tbi_30_:
2975 hw_rei
2976 nop
2977
2978//
2979// wrent - PALcode for wrent instruction
2980//
2981// Entry:
2982// Vectored into via hardware PALcode instruction dispatch.
2983//
2984// Function:
2985// Update ent* in paltemps
2986// r16/a0 = Address of entry routine
2987// r17/a1 = Entry Number 0..5
2988//
2989// r22, r23 trashed
2990//
2991
2992 CALL_PAL_PRIV(PAL_WRENT_ENTRY)
2993Call_Pal_Wrent:
2994 cmpult r17, 6, r22 // see if in range
2995 br r23, CALL_PAL_wrent_10_ // get our address
2996
2997CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
2998 blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
2999
3000 lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
3001 sll r17, 4, r17 // *16
3002
3003 addq r17, r23, r23 // Get address in table
3004//orig pvc_jsr wrent
3005 jmp r31, (r23) // and go do it
3006
3007CALL_PAL_wrent_30_:
3008 hw_rei // out of range, just return
3009
3010//
3011// swpipl - PALcode for swpipl instruction
3012//
3013// Entry:
3014// Vectored into via hardware PALcode instruction dispatch.
3015//
3016// Function:
3017// v0 (r0) <- PS<IPL>
3018// PS<IPL> <- a0<2:0> (r16)
3019//
3020// t8 (r22) is scratch
3021//
3022
3023 CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3024Call_Pal_Swpipl:
3025 and r16, osfps_m_ipl, r16 // clean New ipl
3026 mfpr r22, pt_intmask // get int mask
3027
3028 extbl r22, r16, r22 // get mask for this ipl
3029 bis r11, r31, r0 // return old ipl
3030
3031 bis r16, r31, r11 // set new ps
3032 mtpr r22, ev5__ipl // set new mask
3033
3034 mfpr r31, pt0 // pad ipl write
3035 mfpr r31, pt0 // pad ipl write
3036
3037 hw_rei // back
3038
3039//
3040// rdps - PALcode for rdps instruction
3041//
3042// Entry:
3043// Vectored into via hardware PALcode instruction dispatch.
3044//
3045// Function:
3046// v0 (r0) <- ps
3047//
3048
3049 CALL_PAL_PRIV(PAL_RDPS_ENTRY)
3050Call_Pal_Rdps:
3051 bis r11, r31, r0 // Fetch PALshadow PS
3052 nop // Must be 2 cycles long
3053 hw_rei
3054
3055//
3056// wrkgp - PALcode for wrkgp instruction
3057//
3058// Entry:
3059// Vectored into via hardware PALcode instruction dispatch.
3060//
3061// Function:
3062// kgp <- a0 (r16)
3063//
3064
3065 CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
3066Call_Pal_Wrkgp:
3067 nop
3068 mtpr r16, pt_kgp
3069 nop // Pad for pt write->read restriction
3070 nop
3071 hw_rei
3072
3073//
3074// wrusp - PALcode for wrusp instruction
3075//
3076// Entry:
3077// Vectored into via hardware PALcode instruction dispatch.
3078//
3079// Function:
3080// usp <- a0 (r16)
3081//
3082
3083 CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
3084Call_Pal_Wrusp:
3085 nop
3086 mtpr r16, pt_usp
3087 nop // Pad possible pt write->read restriction
3088 nop
3089 hw_rei
3090
3091//
3092// wrperfmon - PALcode for wrperfmon instruction
3093//
3094// Entry:
3095// Vectored into via hardware PALcode instruction dispatch.
3096//
3097//
3098// Function:
3099// Various control functions for the onchip performance counters
3100//
3101// option selector in r16
3102// option argument in r17
3103// returned status in r0
3104//
3105//
3106// r16 = 0 Disable performance monitoring for one or more cpu's
3107// r17 = 0 disable no counters
3108// r17 = bitmask disable counters specified in bit mask (1=disable)
3109//
3110// r16 = 1 Enable performance monitoring for one or more cpu's
3111// r17 = 0 enable no counters
3112// r17 = bitmask enable counters specified in bit mask (1=enable)
3113//
3114// r16 = 2 Mux select for one or more cpu's
3115// r17 = Mux selection (cpu specific)
3116// <24:19> bc_ctl<pm_mux_sel> field (see spec)
3117// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
3118//
3119// r16 = 3 Options
3120// r17 = (cpu specific)
3121// <0> = 0 log all processes
3122// <0> = 1 log only selected processes
3123// <30,9,8> mode select - ku,kp,kk
3124//
3125// r16 = 4 Interrupt frequency select
3126// r17 = (cpu specific) indicates interrupt frequencies desired for each
3127// counter, with "zero interrupts" being an option
3128// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
3129//
3130// r16 = 5 Read Counters
3131// r17 = na
3132// r0 = value (same format as ev5 pmctr)
3133// <0> = 0 Read failed
3134// <0> = 1 Read succeeded
3135//
3136// r16 = 6 Write Counters
3137// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
3138//
3139// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
3140// r17 = 0 enable no counters
3141// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
3142//
3143//=============================================================================
3144//Assumptions:
3145//PMCTR_CTL:
3146//
3147// <15:14> CTL0 -- encoded frequency select and enable - CTR0
3148// <13:12> CTL1 -- " - CTR1
3149// <11:10> CTL2 -- " - CTR2
3150//
3151// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
3152// <7:6> FRQ1 -- frequency select for CTR1
3153// <5:4> FRQ2 -- frequency select for CTR2
3154//
3155// <0> all vs. select processes (0=all,1=select)
3156//
3157// where
3158// FRQx<1:0>
3159// 0 1 disable interrupt
3160// 1 0 frequency = 65536 (16384 for ctr2)
3161// 1 1 frequency = 256
3162// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
3163//
3164//=============================================================================
3165//
3166 CALL_PAL_PRIV(0x0039)
3167// unsupported in Hudson code .. pboyle Nov/95
3168CALL_PAL_Wrperfmon:
3169 // "real" performance monitoring code
3170 cmpeq r16, 1, r0 // check for enable
3171 bne r0, perfmon_en // br if requested to enable
3172
3173 cmpeq r16, 2, r0 // check for mux ctl
3174 bne r0, perfmon_muxctl // br if request to set mux controls
3175
3176 cmpeq r16, 3, r0 // check for options
3177 bne r0, perfmon_ctl // br if request to set options
3178
3179 cmpeq r16, 4, r0 // check for interrupt frequency select
3180 bne r0, perfmon_freq // br if request to change frequency select
3181
3182 cmpeq r16, 5, r0 // check for counter read request
3183 bne r0, perfmon_rd // br if request to read counters
3184
3185 cmpeq r16, 6, r0 // check for counter write request
3186 bne r0, perfmon_wr // br if request to write counters
3187
3188 cmpeq r16, 7, r0 // check for counter clear/enable request
3189 bne r0, perfmon_enclr // br if request to clear/enable counters
3190
3191 beq r16, perfmon_dis // br if requested to disable (r16=0)
3192 br r31, perfmon_unknown // br if unknown request
3193
3194//
3195// rdusp - PALcode for rdusp instruction
3196//
3197// Entry:
3198// Vectored into via hardware PALcode instruction dispatch.
3199//
3200// Function:
3201// v0 (r0) <- usp
3202//
3203
3204 CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
3205Call_Pal_Rdusp:
3206 nop
3207 mfpr r0, pt_usp
3208 hw_rei
3209
3210
3211 CALL_PAL_PRIV(0x003B)
3212CallPal_OpcDec3B:
3213 br r31, osfpal_calpal_opcdec
3214
3215//
3216// whami - PALcode for whami instruction
3217//
3218// Entry:
3219// Vectored into via hardware PALcode instruction dispatch.
3220//
3221// Function:
3222// v0 (r0) <- whami
3223//
3224 CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
3225Call_Pal_Whami:
3226 nop
3227 mfpr r0, pt_whami // Get Whami
3228 extbl r0, 1, r0 // Isolate just whami bits
3229 hw_rei
3230
3231//
3232// retsys - PALcode for retsys instruction
3233//
3234// Entry:
3235// Vectored into via hardware PALcode instruction dispatch.
3236// 00(sp) contains return pc
3237// 08(sp) contains r29
3238//
3239// Function:
3240// Return from system call.
3241// mode switched from kern to user.
3242// stacks swapped, ugp, upc restored.
3243// r23, r25 junked
3244//
3245
3246 CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
3247Call_Pal_Retsys:
3248 lda r25, osfsf_c_size(sp) // pop stack
3249 bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
3250
3251 mfpr r14, exc_addr // save exc_addr in case of fault
3252 ldq r23, osfsf_pc(sp) // get pc
3253
3254 ldq r29, osfsf_gp(sp) // get gp
3255 stl_c r31, -4(sp) // clear lock_flag
3256
3257 lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
3258 mfpr r30, pt_usp // get users stack
3259
3260 bic r23, 3, r23 // clean return pc
3261 mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
3262
3263 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3264 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3265
3266 mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
3267 mtpr r25, pt_ksp // save kern stack
3268
3269 rc r31 // clear inter_flag
3270// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
3271 hw_rei_spe // and back
3272
3273
3274 CALL_PAL_PRIV(0x003E)
3275CallPal_OpcDec3E:
3276 br r31, osfpal_calpal_opcdec
3277
3278//
3279// rti - PALcode for rti instruction
3280//
3281// Entry:
3282// Vectored into via hardware PALcode instruction dispatch.
3283//
3284// Function:
3285// 00(sp) -> ps
3286// 08(sp) -> pc
3287// 16(sp) -> r29 (gp)
3288// 24(sp) -> r16 (a0)
3289// 32(sp) -> r17 (a1)
3290// 40(sp) -> r18 (a3)
3291//
3292
3293 CALL_PAL_PRIV(PAL_RTI_ENTRY)
3294 /* called once by platform_tlaser */
3295 .globl Call_Pal_Rti
3296Call_Pal_Rti:
3297 lda r25, osfsf_c_size(sp) // get updated sp
3298 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
3299
3300 mfpr r14, exc_addr // save PC in case of fault
3301 rc r31 // clear intr_flag
3302
3303 ldq r12, -6*8(r25) // get ps
3304 ldq r13, -5*8(r25) // pc
3305
3306 ldq r18, -1*8(r25) // a2
3307 ldq r17, -2*8(r25) // a1
3308
3309 ldq r16, -3*8(r25) // a0
3310 ldq r29, -4*8(r25) // gp
3311
3312 bic r13, 3, r13 // clean return pc
3313 stl_c r31, -4(r25) // clear lock_flag
3314
3315 and r12, osfps_m_mode, r11 // get mode
3316 mtpr r13, exc_addr // set return address
3317
3318 beq r11, rti_to_kern // br if rti to Kern
3319 br r31, rti_to_user // out of call_pal space
3320
3321
3322///////////////////////////////////////////////////
3323// Start the Unprivileged CALL_PAL Entry Points
3324///////////////////////////////////////////////////
3325
3326//
3327// bpt - PALcode for bpt instruction
3328//
3329// Entry:
3330// Vectored into via hardware PALcode instruction dispatch.
3331//
3332// Function:
3333// Build stack frame
3334// a0 <- code
3335// a1 <- unpred
3336// a2 <- unpred
3337// vector via entIF
3338//
3339//
3340//
3341 .text 1
3342// . = 0x3000
3343 CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
3344Call_Pal_Bpt:
3345 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3346 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3347
3348 bis r11, r31, r12 // Save PS for stack write
3349 bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
3350
3351 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3352 // no virt ref for next 2 cycles
3353 mtpr r30, pt_usp // save user stack
3354
3355 bis r31, r31, r11 // Set new PS
3356 mfpr r30, pt_ksp
3357
3358CALL_PAL_bpt_10_:
3359 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3360 mfpr r14, exc_addr // get pc
3361
3362 stq r16, osfsf_a0(sp) // save regs
3363 bis r31, osf_a0_bpt, r16 // set a0
3364
3365 stq r17, osfsf_a1(sp) // a1
3366 br r31, bpt_bchk_common // out of call_pal space
3367
3368
3369//
3370// bugchk - PALcode for bugchk instruction
3371//
3372// Entry:
3373// Vectored into via hardware PALcode instruction dispatch.
3374//
3375// Function:
3376// Build stack frame
3377// a0 <- code
3378// a1 <- unpred
3379// a2 <- unpred
3380// vector via entIF
3381//
3382//
3383//
3384 CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
3385Call_Pal_Bugchk:
3386 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3387 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3388
3389 bis r11, r31, r12 // Save PS for stack write
3390 bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
3391
3392 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3393 // no virt ref for next 2 cycles
3394 mtpr r30, pt_usp // save user stack
3395
3396 bis r31, r31, r11 // Set new PS
3397 mfpr r30, pt_ksp
3398
3399CALL_PAL_bugchk_10_:
3400 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3401 mfpr r14, exc_addr // get pc
3402
3403 stq r16, osfsf_a0(sp) // save regs
3404 bis r31, osf_a0_bugchk, r16 // set a0
3405
3406 stq r17, osfsf_a1(sp) // a1
3407 br r31, bpt_bchk_common // out of call_pal space
3408
3409
3410 CALL_PAL_UNPRIV(0x0082)
3411CallPal_OpcDec82:
3412 br r31, osfpal_calpal_opcdec
3413
3414//
3415// callsys - PALcode for callsys instruction
3416//
3417// Entry:
3418// Vectored into via hardware PALcode instruction dispatch.
3419//
3420// Function:
3421// Switch mode to kernel and build a callsys stack frame.
3422// sp = ksp
3423// gp = kgp
3424// t8 - t10 (r22-r24) trashed
3425//
3426//
3427//
3428 CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
3429Call_Pal_Callsys:
3430
3431 and r11, osfps_m_mode, r24 // get mode
3432 mfpr r22, pt_ksp // get ksp
3433
3434 beq r24, sys_from_kern // sysCall from kern is not allowed
3435 mfpr r12, pt_entsys // get address of callSys routine
3436
3437//
3438// from here on we know we are in user going to Kern
3439//
3440 mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3441 mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3442
3443 bis r31, r31, r11 // PS=0 (mode=kern)
3444 mfpr r23, exc_addr // get pc
3445
3446 mtpr r30, pt_usp // save usp
3447 lda sp, 0-osfsf_c_size(r22)// set new sp
3448
3449 stq r29, osfsf_gp(sp) // save user gp/r29
3450 stq r24, osfsf_ps(sp) // save ps
3451
3452 stq r23, osfsf_pc(sp) // save pc
3453 mtpr r12, exc_addr // set address
3454 // 1 cycle to hw_rei
3455
3456 mfpr r29, pt_kgp // get the kern gp/r29
3457
3458 hw_rei_spe // and off we go!
3459
3460
3461 CALL_PAL_UNPRIV(0x0084)
3462CallPal_OpcDec84:
3463 br r31, osfpal_calpal_opcdec
3464
3465 CALL_PAL_UNPRIV(0x0085)
3466CallPal_OpcDec85:
3467 br r31, osfpal_calpal_opcdec
3468
3469//
3470// imb - PALcode for imb instruction
3471//
3472// Entry:
3473// Vectored into via hardware PALcode instruction dispatch.
3474//
3475// Function:
3476// Flush the writebuffer and flush the Icache
3477//
3478//
3479//
3480 CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
3481Call_Pal_Imb:
3482 mb // Clear the writebuffer
3483 mfpr r31, ev5__mcsr // Sync with clear
3484 nop
3485 nop
3486 br r31, pal_ic_flush // Flush Icache
3487
3488
3489// CALL_PAL OPCDECs
3490
3491 CALL_PAL_UNPRIV(0x0087)
3492CallPal_OpcDec87:
3493 br r31, osfpal_calpal_opcdec
3494
3495 CALL_PAL_UNPRIV(0x0088)
3496CallPal_OpcDec88:
3497 br r31, osfpal_calpal_opcdec
3498
3499 CALL_PAL_UNPRIV(0x0089)
3500CallPal_OpcDec89:
3501 br r31, osfpal_calpal_opcdec
3502
3503 CALL_PAL_UNPRIV(0x008A)
3504CallPal_OpcDec8A:
3505 br r31, osfpal_calpal_opcdec
3506
3507 CALL_PAL_UNPRIV(0x008B)
3508CallPal_OpcDec8B:
3509 br r31, osfpal_calpal_opcdec
3510
3511 CALL_PAL_UNPRIV(0x008C)
3512CallPal_OpcDec8C:
3513 br r31, osfpal_calpal_opcdec
3514
3515 CALL_PAL_UNPRIV(0x008D)
3516CallPal_OpcDec8D:
3517 br r31, osfpal_calpal_opcdec
3518
3519 CALL_PAL_UNPRIV(0x008E)
3520CallPal_OpcDec8E:
3521 br r31, osfpal_calpal_opcdec
3522
3523 CALL_PAL_UNPRIV(0x008F)
3524CallPal_OpcDec8F:
3525 br r31, osfpal_calpal_opcdec
3526
3527 CALL_PAL_UNPRIV(0x0090)
3528CallPal_OpcDec90:
3529 br r31, osfpal_calpal_opcdec
3530
3531 CALL_PAL_UNPRIV(0x0091)
3532CallPal_OpcDec91:
3533 br r31, osfpal_calpal_opcdec
3534
3535 CALL_PAL_UNPRIV(0x0092)
3536CallPal_OpcDec92:
3537 br r31, osfpal_calpal_opcdec
3538
3539 CALL_PAL_UNPRIV(0x0093)
3540CallPal_OpcDec93:
3541 br r31, osfpal_calpal_opcdec
3542
3543 CALL_PAL_UNPRIV(0x0094)
3544CallPal_OpcDec94:
3545 br r31, osfpal_calpal_opcdec
3546
3547 CALL_PAL_UNPRIV(0x0095)
3548CallPal_OpcDec95:
3549 br r31, osfpal_calpal_opcdec
3550
3551 CALL_PAL_UNPRIV(0x0096)
3552CallPal_OpcDec96:
3553 br r31, osfpal_calpal_opcdec
3554
3555 CALL_PAL_UNPRIV(0x0097)
3556CallPal_OpcDec97:
3557 br r31, osfpal_calpal_opcdec
3558
3559 CALL_PAL_UNPRIV(0x0098)
3560CallPal_OpcDec98:
3561 br r31, osfpal_calpal_opcdec
3562
3563 CALL_PAL_UNPRIV(0x0099)
3564CallPal_OpcDec99:
3565 br r31, osfpal_calpal_opcdec
3566
3567 CALL_PAL_UNPRIV(0x009A)
3568CallPal_OpcDec9A:
3569 br r31, osfpal_calpal_opcdec
3570
3571 CALL_PAL_UNPRIV(0x009B)
3572CallPal_OpcDec9B:
3573 br r31, osfpal_calpal_opcdec
3574
3575 CALL_PAL_UNPRIV(0x009C)
3576CallPal_OpcDec9C:
3577 br r31, osfpal_calpal_opcdec
3578
3579 CALL_PAL_UNPRIV(0x009D)
3580CallPal_OpcDec9D:
3581 br r31, osfpal_calpal_opcdec
3582
3583//
3584// rdunique - PALcode for rdunique instruction
3585//
3586// Entry:
3587// Vectored into via hardware PALcode instruction dispatch.
3588//
3589// Function:
3590// v0 (r0) <- unique
3591//
3592//
3593//
3594 CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
3595CALL_PALrdunique_:
3596 mfpr r0, pt_pcbb // get pcb pointer
3597 ldq_p r0, osfpcb_q_unique(r0) // get new value
3598
3599 hw_rei
3600
3601//
3602// wrunique - PALcode for wrunique instruction
3603//
3604// Entry:
3605// Vectored into via hardware PALcode instruction dispatch.
3606//
3607// Function:
3608// unique <- a0 (r16)
3609//
3610//
3611//
3612CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
3613CALL_PAL_Wrunique:
3614 nop
3615 mfpr r12, pt_pcbb // get pcb pointer
3616 stq_p r16, osfpcb_q_unique(r12)// get new value
3617 nop // Pad palshadow write
3618 hw_rei // back
3619
3620// CALL_PAL OPCDECs
3621
3622 CALL_PAL_UNPRIV(0x00A0)
3623CallPal_OpcDecA0:
3624 br r31, osfpal_calpal_opcdec
3625
3626 CALL_PAL_UNPRIV(0x00A1)
3627CallPal_OpcDecA1:
3628 br r31, osfpal_calpal_opcdec
3629
3630 CALL_PAL_UNPRIV(0x00A2)
3631CallPal_OpcDecA2:
3632 br r31, osfpal_calpal_opcdec
3633
3634 CALL_PAL_UNPRIV(0x00A3)
3635CallPal_OpcDecA3:
3636 br r31, osfpal_calpal_opcdec
3637
3638 CALL_PAL_UNPRIV(0x00A4)
3639CallPal_OpcDecA4:
3640 br r31, osfpal_calpal_opcdec
3641
3642 CALL_PAL_UNPRIV(0x00A5)
3643CallPal_OpcDecA5:
3644 br r31, osfpal_calpal_opcdec
3645
3646 CALL_PAL_UNPRIV(0x00A6)
3647CallPal_OpcDecA6:
3648 br r31, osfpal_calpal_opcdec
3649
3650 CALL_PAL_UNPRIV(0x00A7)
3651CallPal_OpcDecA7:
3652 br r31, osfpal_calpal_opcdec
3653
3654 CALL_PAL_UNPRIV(0x00A8)
3655CallPal_OpcDecA8:
3656 br r31, osfpal_calpal_opcdec
3657
3658 CALL_PAL_UNPRIV(0x00A9)
3659CallPal_OpcDecA9:
3660 br r31, osfpal_calpal_opcdec
3661
3662
3663//
3664// gentrap - PALcode for gentrap instruction
3665//
3666// CALL_PAL_gentrap:
3667// Entry:
3668// Vectored into via hardware PALcode instruction dispatch.
3669//
3670// Function:
3671// Build stack frame
3672// a0 <- code
3673// a1 <- unpred
3674// a2 <- unpred
3675// vector via entIF
3676//
3677//
3678
3679 CALL_PAL_UNPRIV(0x00AA)
3680// unsupported in Hudson code .. pboyle Nov/95
3681CALL_PAL_gentrap:
3682 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3683 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3684
3685 bis r11, r31, r12 // Save PS for stack write
3686 bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
3687
3688 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3689 // no virt ref for next 2 cycles
3690 mtpr r30, pt_usp // save user stack
3691
3692 bis r31, r31, r11 // Set new PS
3693 mfpr r30, pt_ksp
3694
3695CALL_PAL_gentrap_10_:
3696 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3697 mfpr r14, exc_addr // get pc
3698
3699 stq r16, osfsf_a0(sp) // save regs
3700 bis r31, osf_a0_gentrap, r16// set a0
3701
3702 stq r17, osfsf_a1(sp) // a1
3703 br r31, bpt_bchk_common // out of call_pal space
3704
3705
3706// CALL_PAL OPCDECs
3707
3708 CALL_PAL_UNPRIV(0x00AB)
3709CallPal_OpcDecAB:
3710 br r31, osfpal_calpal_opcdec
3711
3712 CALL_PAL_UNPRIV(0x00AC)
3713CallPal_OpcDecAC:
3714 br r31, osfpal_calpal_opcdec
3715
3716 CALL_PAL_UNPRIV(0x00AD)
3717CallPal_OpcDecAD:
3718 br r31, osfpal_calpal_opcdec
3719
3720 CALL_PAL_UNPRIV(0x00AE)
3721CallPal_OpcDecAE:
3722 br r31, osfpal_calpal_opcdec
3723
3724 CALL_PAL_UNPRIV(0x00AF)
3725CallPal_OpcDecAF:
3726 br r31, osfpal_calpal_opcdec
3727
3728 CALL_PAL_UNPRIV(0x00B0)
3729CallPal_OpcDecB0:
3730 br r31, osfpal_calpal_opcdec
3731
3732 CALL_PAL_UNPRIV(0x00B1)
3733CallPal_OpcDecB1:
3734 br r31, osfpal_calpal_opcdec
3735
3736 CALL_PAL_UNPRIV(0x00B2)
3737CallPal_OpcDecB2:
3738 br r31, osfpal_calpal_opcdec
3739
3740 CALL_PAL_UNPRIV(0x00B3)
3741CallPal_OpcDecB3:
3742 br r31, osfpal_calpal_opcdec
3743
3744 CALL_PAL_UNPRIV(0x00B4)
3745CallPal_OpcDecB4:
3746 br r31, osfpal_calpal_opcdec
3747
3748 CALL_PAL_UNPRIV(0x00B5)
3749CallPal_OpcDecB5:
3750 br r31, osfpal_calpal_opcdec
3751
3752 CALL_PAL_UNPRIV(0x00B6)
3753CallPal_OpcDecB6:
3754 br r31, osfpal_calpal_opcdec
3755
3756 CALL_PAL_UNPRIV(0x00B7)
3757CallPal_OpcDecB7:
3758 br r31, osfpal_calpal_opcdec
3759
3760 CALL_PAL_UNPRIV(0x00B8)
3761CallPal_OpcDecB8:
3762 br r31, osfpal_calpal_opcdec
3763
3764 CALL_PAL_UNPRIV(0x00B9)
3765CallPal_OpcDecB9:
3766 br r31, osfpal_calpal_opcdec
3767
3768 CALL_PAL_UNPRIV(0x00BA)
3769CallPal_OpcDecBA:
3770 br r31, osfpal_calpal_opcdec
3771
3772 CALL_PAL_UNPRIV(0x00BB)
3773CallPal_OpcDecBB:
3774 br r31, osfpal_calpal_opcdec
3775
3776 CALL_PAL_UNPRIV(0x00BC)
3777CallPal_OpcDecBC:
3778 br r31, osfpal_calpal_opcdec
3779
3780 CALL_PAL_UNPRIV(0x00BD)
3781CallPal_OpcDecBD:
3782 br r31, osfpal_calpal_opcdec
3783
3784 CALL_PAL_UNPRIV(0x00BE)
3785CallPal_OpcDecBE:
3786 br r31, osfpal_calpal_opcdec
3787
3788 CALL_PAL_UNPRIV(0x00BF)
3789CallPal_OpcDecBF:
3790 // MODIFIED BY EGH 2/25/04
3791 br r31, copypal_impl
3792
3793
3794/*======================================================================*/
3795/* OSF/1 CALL_PAL CONTINUATION AREA */
3796/*======================================================================*/
3797
3798 .text 2
3799
3800 . = 0x4000
3801
3802
3803// Continuation of MTPR_PERFMON
3804 ALIGN_BLOCK
3805 // "real" performance monitoring code
3806// mux ctl
3807perfmon_muxctl:
3808 lda r8, 1(r31) // get a 1
3809 sll r8, pmctr_v_sel0, r8 // move to sel0 position
3810 or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
3811 and r17, r8, r25 // isolate pmctr mux select bits
3812 mfpr r0, ev5__pmctr
3813 bic r0, r8, r0 // clear old mux select bits
3814 or r0,r25, r25 // or in new mux select bits
3815 mtpr r25, ev5__pmctr
3816
3817 // ok, now tackle cbox mux selects
3818 ldah r14, 0xfff0(r31)
3819 zap r14, 0xE0, r14 // Get Cbox IPR base
3820//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
3821// adapted from ev5_pal_macros.mar
3822 mfpr r16, pt_impure
3823 lda r16, CNS_Q_IPR(r16)
3824 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
3825
3826 lda r8, 0x3F(r31) // build mux select mask
3827 sll r8, bc_ctl_v_pm_mux_sel, r8
3828
3829 and r17, r8, r25 // isolate bc_ctl mux select bits
3830 bic r16, r8, r16 // isolate old mux select bits
3831 or r16, r25, r25 // create new bc_ctl
3832 mb // clear out cbox for future ipr write
3833 stq_p r25, ev5__bc_ctl(r14) // store to cbox ipr
3834 mb // clear out cbox for future ipr write
3835
3836//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
3837// adapted from ev5_pal_macros.mar
3838 mfpr r16, pt_impure
3839 lda r16, CNS_Q_IPR(r16)
3840 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
3841
3842 br r31, perfmon_success
3843
3844
3845// requested to disable perf monitoring
3846perfmon_dis:
3847 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3848perfmon_dis_ctr0: // and begin with ctr0
3849 blbc r17, perfmon_dis_ctr1 // do not disable ctr0
3850 lda r8, 3(r31)
3851 sll r8, pmctr_v_ctl0, r8
3852 bic r14, r8, r14 // disable ctr0
3853perfmon_dis_ctr1:
3854 srl r17, 1, r17
3855 blbc r17, perfmon_dis_ctr2 // do not disable ctr1
3856 lda r8, 3(r31)
3857 sll r8, pmctr_v_ctl1, r8
3858 bic r14, r8, r14 // disable ctr1
3859perfmon_dis_ctr2:
3860 srl r17, 1, r17
3861 blbc r17, perfmon_dis_update // do not disable ctr2
3862 lda r8, 3(r31)
3863 sll r8, pmctr_v_ctl2, r8
3864 bic r14, r8, r14 // disable ctr2
3865perfmon_dis_update:
3866 mtpr r14, ev5__pmctr // update pmctr ipr
3867//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
3868// adapted from ev5_pal_macros.mar
3869//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
3870 mfpr r25, pt_impure
3871 lda r25, CNS_Q_IPR(r25)
3872 RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
3873
3874 lda r17, 0x3F(r31) // build mask
3875 sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
3876 and r14, r17, r14 // isolate ctl bits
3877 bic r8, r17, r8 // clear out old ctl bits
3878 or r14, r8, r14 // create shadow ctl bits
3879//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
3880//adjusted impure pointer still in r25
3881 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
3882
3883 br r31, perfmon_success
3884
3885
3886// requested to enable perf monitoring
3887//;the following code can be greatly simplified for pass2, but should work fine as is.
3888
3889
3890perfmon_enclr:
3891 lda r9, 1(r31) // set enclr flag
3892 br perfmon_en_cont
3893
3894perfmon_en:
3895 bis r31, r31, r9 // clear enclr flag
3896
3897perfmon_en_cont:
3898 mfpr r8, pt_pcbb // get PCB base
3899//orig get_pmctr_ctl r25, r25
3900 mfpr r25, pt_impure
3901 lda r25, CNS_Q_IPR(r25)
3902 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
3903
3904 ldq_p r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
3905 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3906 srl r16, osfpcb_v_pme, r16 // get pme bit
3907 mfpr r13, icsr
3908 and r16, 1, r16 // isolate pme bit
3909
3910 // this code only needed in pass2 and later
3911 lda r12, 1<<icsr_v_pmp(r31) // pb
3912 bic r13, r12, r13 // clear pmp bit
3913 sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
3914 or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
3915 mtpr r13, icsr // update icsr
3916
3917 bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
3918
3919 sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
3920 bis r14, r31, r13 // copy pmctr
3921
3922perfmon_en_ctr0: // and begin with ctr0
3923 blbc r17, perfmon_en_ctr1 // do not enable ctr0
3924
3925 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
3926 lda r8, 0xffff(r31)
3927 zapnot r8, 3, r8 // ctr0<15:0> mask
3928 sll r8, pmctr_v_ctr0, r8
3929 bic r14, r8, r14 // clear ctr bits
3930 bic r13, r8, r13 // clear ctr bits
3931
3932perfmon_en_noclr0:
3933//orig get_addr r8, 3<<pmctr_v_ctl0, r31
3934 LDLI(r8, (3<<pmctr_v_ctl0))
3935 and r25, r8, r12 //isolate frequency select bits for ctr0
3936 bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
3937 or r14,r12,r14 // or in new ctl0 bits
3938
3939perfmon_en_ctr1: // enable ctr1
3940 srl r17, 1, r17 // get ctr1 enable
3941 blbc r17, perfmon_en_ctr2 // do not enable ctr1
3942
3943 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
3944 lda r8, 0xffff(r31)
3945 zapnot r8, 3, r8 // ctr1<15:0> mask
3946 sll r8, pmctr_v_ctr1, r8
3947 bic r14, r8, r14 // clear ctr bits
3948 bic r13, r8, r13 // clear ctr bits
3949
3950perfmon_en_noclr1:
3951//orig get_addr r8, 3<<pmctr_v_ctl1, r31
3952 LDLI(r8, (3<<pmctr_v_ctl1))
3953 and r25, r8, r12 //isolate frequency select bits for ctr1
3954 bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
3955 or r14,r12,r14 // or in new ctl1 bits
3956
3957perfmon_en_ctr2: // enable ctr2
3958 srl r17, 1, r17 // get ctr2 enable
3959 blbc r17, perfmon_en_return // do not enable ctr2 - return
3960
3961 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
3962 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
3963 sll r8, pmctr_v_ctr2, r8
3964 bic r14, r8, r14 // clear ctr bits
3965 bic r13, r8, r13 // clear ctr bits
3966
3967perfmon_en_noclr2:
3968//orig get_addr r8, 3<<pmctr_v_ctl2, r31
3969 LDLI(r8, (3<<pmctr_v_ctl2))
3970 and r25, r8, r12 //isolate frequency select bits for ctr2
3971 bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
3972 or r14,r12,r14 // or in new ctl2 bits
3973
3974perfmon_en_return:
3975 cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
3976 // else only do the counter clears
3977 mtpr r13, ev5__pmctr // update pmctr ipr
3978
3979//;this code not needed for pass2 and later, but does not hurt to leave it in
3980 lda r8, 0x3F(r31)
3981//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
3982 mfpr r12, pt_impure
3983 lda r12, CNS_Q_IPR(r12)
3984 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
3985
3986 sll r8, pmctr_v_ctl2, r8 // build ctl mask
3987 and r8, r14, r14 // isolate new ctl bits
3988 bic r25, r8, r25 // clear out old ctl value
3989 or r25, r14, r14 // create new pmctr_ctl
3990//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
3991 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
3992
3993 br r31, perfmon_success
3994
3995
3996// options...
3997perfmon_ctl:
3998
3999// set mode
4000//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4001 mfpr r12, pt_impure
4002 lda r12, CNS_Q_IPR(r12)
4003 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4004
4005 // build mode mask for pmctr register
4006 LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
4007 mfpr r0, ev5__pmctr
4008 and r17, r8, r25 // isolate pmctr mode bits
4009 bic r0, r8, r0 // clear old mode bits
4010 or r0, r25, r25 // or in new mode bits
4011 mtpr r25, ev5__pmctr
4012
4013 // the following code will only be used in pass2, but should
4014 // not hurt anything if run in pass1.
4015 mfpr r8, icsr
4016 lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
4017 bic r8, r25, r8 // clear old pma bit
4018 cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
4019 or r8, r25, r8
4020 mtpr r8, icsr // 4 bubbles to hw_rei
4021 mfpr r31, pt0 // pad icsr write
4022 mfpr r31, pt0 // pad icsr write
4023
4024 // the following code not needed for pass2 and later, but
4025 // should work anyway.
4026 bis r14, 1, r14 // set for select processes
4027 blbs r17, perfmon_sp // branch if select processes
4028 bic r14, 1, r14 // all processes
4029perfmon_sp:
4030//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4031 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4032 br r31, perfmon_success
4033
4034// counter frequency select
4035perfmon_freq:
4036//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4037 mfpr r12, pt_impure
4038 lda r12, CNS_Q_IPR(r12)
4039 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4040
4041 lda r8, 0x3F(r31)
4042//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
4043// I guess this should be a shift of 4 bits from the above control register structure
4044#define pmctr_ctl_v_frq2_SHIFT 4
4045 sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
4046
4047 and r8, r17, r17
4048 bic r14, r8, r14 // clear out old frequency select bits
4049
4050 or r17, r14, r14 // or in new frequency select info
4051//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4052 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4053
4054 br r31, perfmon_success
4055
4056// read counters
4057perfmon_rd:
4058 mfpr r0, ev5__pmctr
4059 or r0, 1, r0 // or in return status
4060 hw_rei // back to user
4061
4062// write counters
4063perfmon_wr:
4064 mfpr r14, ev5__pmctr
4065 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4066 sll r8, pmctr_v_ctr2, r8
4067
4068 LDLI(r9, (0xFFFFFFFF)) // ctr2<15:0>,ctr1<15:0> mask
4069 sll r9, pmctr_v_ctr1, r9
4070 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
4071 bic r14, r8, r14 // clear ctr fields
4072 and r17, r8, r25 // clear all but ctr fields
4073 or r25, r14, r14 // write ctr fields
4074 mtpr r14, ev5__pmctr // update pmctr ipr
4075
4076 mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
4077
4078perfmon_success:
4079 or r31, 1, r0 // set success
4080 hw_rei // back to user
4081
4082perfmon_unknown:
4083 or r31, r31, r0 // set fail
4084 hw_rei // back to user
4085
4086
4087//////////////////////////////////////////////////////////
4088// Copy code
4089//////////////////////////////////////////////////////////
4090
4091copypal_impl:
4092 mov r16, r0
4093#ifdef CACHE_COPY
4094#ifndef CACHE_COPY_UNALIGNED
4095 and r16, 63, r8
4096 and r17, 63, r9
4097 bis r8, r9, r8
4098 bne r8, cache_copy_done
4099#endif
4100 bic r18, 63, r8
4101 and r18, 63, r18
4102 beq r8, cache_copy_done
4103cache_loop:
4104 ldf f17, 0(r16)
4105 stf f17, 0(r16)
4106 addq r17, 64, r17
4107 addq r16, 64, r16
4108 subq r8, 64, r8
4109 bne r8, cache_loop
4110cache_copy_done:
4111#endif
4112 ble r18, finished // if len <=0 we are finished
4113 ldq_u r8, 0(r17)
4114 xor r17, r16, r9
4115 and r9, 7, r9
4116 and r16, 7, r10
4117 bne r9, unaligned
4118 beq r10, aligned
4119 ldq_u r9, 0(r16)
4120 addq r18, r10, r18
4121 mskqh r8, r17, r8
4122 mskql r9, r17, r9
4123 bis r8, r9, r8
4124aligned:
4125 subq r18, 1, r10
4126 bic r10, 7, r10
4127 and r18, 7, r18
4128 beq r10, aligned_done
4129loop:
4130 stq_u r8, 0(r16)
4131 ldq_u r8, 8(r17)
4132 subq r10, 8, r10
4133 lda r16,8(r16)
4134 lda r17,8(r17)
4135 bne r10, loop
4136aligned_done:
4137 bne r18, few_left
4138 stq_u r8, 0(r16)
4139 br r31, finished
4140 few_left:
4141 mskql r8, r18, r10
4142 ldq_u r9, 0(r16)
4143 mskqh r9, r18, r9
4144 bis r10, r9, r10
4145 stq_u r10, 0(r16)
4146 br r31, finished
4147unaligned:
4148 addq r17, r18, r25
4149 cmpule r18, 8, r9
4150 bne r9, unaligned_few_left
4151 beq r10, unaligned_dest_aligned
4152 and r16, 7, r10
4153 subq r31, r10, r10
4154 addq r10, 8, r10
4155 ldq_u r9, 7(r17)
4156 extql r8, r17, r8
4157 extqh r9, r17, r9
4158 bis r8, r9, r12
4159 insql r12, r16, r12
4160 ldq_u r13, 0(r16)
4161 mskql r13, r16, r13
4162 bis r12, r13, r12
4163 stq_u r12, 0(r16)
4164 addq r16, r10, r16
4165 addq r17, r10, r17
4166 subq r18, r10, r18
4167 ldq_u r8, 0(r17)
4168unaligned_dest_aligned:
4169 subq r18, 1, r10
4170 bic r10, 7, r10
4171 and r18, 7, r18
4172 beq r10, unaligned_partial_left
4173unaligned_loop:
4174 ldq_u r9, 7(r17)
4175 lda r17, 8(r17)
4176 extql r8, r17, r12
4177 extqh r9, r17, r13
4178 subq r10, 8, r10
4179 bis r12, r13, r13
4180 stq r13, 0(r16)
4181 lda r16, 8(r16)
4182 beq r10, unaligned_second_partial_left
4183 ldq_u r8, 7(r17)
4184 lda r17, 8(r17)
4185 extql r9, r17, r12
4186 extqh r8, r17, r13
4187 bis r12, r13, r13
4188 subq r10, 8, r10
4189 stq r13, 0(r16)
4190 lda r16, 8(r16)
4191 bne r10, unaligned_loop
4192unaligned_partial_left:
4193 mov r8, r9
4194unaligned_second_partial_left:
4195 ldq_u r8, -1(r25)
4196 extql r9, r17, r9
4197 extqh r8, r17, r8
4198 bis r8, r9, r8
4199 bne r18, few_left
4200 stq_u r8, 0(r16)
4201 br r31, finished
4202unaligned_few_left:
4203 ldq_u r9, -1(r25)
4204 extql r8, r17, r8
4205 extqh r9, r17, r9
4206 bis r8, r9, r8
4207 insqh r8, r16, r9
4208 insql r8, r16, r8
4209 lda r12, -1(r31)
4210 mskql r12, r18, r13
4211 cmovne r13, r13, r12
4212 insqh r12, r16, r13
4213 insql r12, r16, r12
4214 addq r16, r18, r10
4215 ldq_u r14, 0(r16)
4216 ldq_u r25, -1(r10)
4217 bic r14, r12, r14
4218 bic r25, r13, r25
4219 and r8, r12, r8
4220 and r9, r13, r9
4221 bis r8, r14, r8
4222 bis r9, r25, r9
4223 stq_u r9, -1(r10)
4224 stq_u r8, 0(r16)
4225finished:
4226 hw_rei
30 */
31
32/*
33 * Copyright 1992, 1993, 1994, 1995 Hewlett-Packard Development
34 * Company, L.P.
35 *
36 * Permission is hereby granted, free of charge, to any person
37 * obtaining a copy of this software and associated documentation
38 * files (the "Software"), to deal in the Software without
39 * restriction, including without limitation the rights to use, copy,
40 * modify, merge, publish, distribute, sublicense, and/or sell copies
41 * of the Software, and to permit persons to whom the Software is
42 * furnished to do so, subject to the following conditions:
43 *
44 * The above copyright notice and this permission notice shall be
45 * included in all copies or substantial portions of the Software.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
48 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
50 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
51 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
52 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
53 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
54 * SOFTWARE.
55 */
56
57// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
58// since we don't have a mechanism to expand the data structures.... pb Nov/95
59#include "ev5_defs.h"
60#include "ev5_impure.h"
61#include "ev5_alpha_defs.h"
62#include "ev5_paldef.h"
63#include "ev5_osfalpha_defs.h"
64#include "fromHudsonMacros.h"
65#include "fromHudsonOsf.h"
66#include "dc21164FromGasSources.h"
67
68#define DEBUGSTORE(c) nop
69
70#define DEBUG_EXC_ADDR()\
71 bsr r25, put_exc_addr; \
72 DEBUGSTORE(13) ; \
73 DEBUGSTORE(10)
74
75// This is the fix for the user-mode super page references causing the
76// machine to crash.
77#define hw_rei_spe hw_rei
78
79#define vmaj 1
80#define vmin 18
81#define vms_pal 1
82#define osf_pal 2
83#define pal_type osf_pal
84#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
85
86
87///////////////////////////
88// PALtemp register usage
89///////////////////////////
90
91// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
92// for these PALtemps:
93//
94// pt0 local scratch
95// pt1 local scratch
96// pt2 entUna pt_entUna
97// pt3 CPU specific impure area pointer pt_impure
98// pt4 memory management temp
99// pt5 memory management temp
100// pt6 memory management temp
101// pt7 entIF pt_entIF
102// pt8 intmask pt_intmask
103// pt9 entSys pt_entSys
104// pt10
105// pt11 entInt pt_entInt
106// pt12 entArith pt_entArith
107// pt13 reserved for system specific PAL
108// pt14 reserved for system specific PAL
109// pt15 reserved for system specific PAL
110// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami,
111// pt_mces
112// pt17 sysval pt_sysval
113// pt18 usp pt_usp
114// pt19 ksp pt_ksp
115// pt20 PTBR pt_ptbr
116// pt21 entMM pt_entMM
117// pt22 kgp pt_kgp
118// pt23 PCBB pt_pcbb
119//
120//
121
122
123/////////////////////////////
124// PALshadow register usage
125/////////////////////////////
126
127//
128// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
129// This maps the OSF PAL usage of R8 - R14 and R25:
130//
131// r8 ITBmiss/DTBmiss scratch
132// r9 ITBmiss/DTBmiss scratch
133// r10 ITBmiss/DTBmiss scratch
134// r11 PS
135// r12 local scratch
136// r13 local scratch
137// r14 local scratch
138// r25 local scratch
139//
140
141
142
143// .sbttl "PALcode configuration options"
144
145// There are a number of options that may be assembled into this version of
146// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
147// the following). The options that can be adjusted cause the resultant PALcode
148// to reflect the desired target system.
149
150// multiprocessor support can be enabled for a max of n processors by
151// setting the following to the number of processors on the system.
152// Note that this is really the max cpuid.
153
154#define max_cpuid 1
155#ifndef max_cpuid
156#define max_cpuid 8
157#endif
158
159#define osf_svmin 1
160#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
161
162//
163// RESET - Reset Trap Entry Point
164//
165// RESET - offset 0000
166// Entry:
167// Vectored into via hardware trap on reset, or branched to
168// on swppal.
169//
170// r0 = whami
171// r1 = pal_base
172// r2 = base of scratch area
173// r3 = halt code
174//
175//
176// Function:
177//
178//
179
180 .text 0
181 . = 0x0000
182 .globl _start
183 .globl Pal_Base
184_start:
185Pal_Base:
186 HDW_VECTOR(PAL_RESET_ENTRY)
187Trap_Reset:
188 nop
189 /*
190 * store into r1
191 */
192 br r1,sys_reset
193
194 // Specify PAL version info as a constant
195 // at a known location (reset + 8).
196
197 .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
198 .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
199 .long 0
200 .long 0
201pal_impure_start:
202 .quad 0
203pal_debug_ptr:
204 .quad 0 // reserved for debug pointer ; 20
205
206
207//
208// IACCVIO - Istream Access Violation Trap Entry Point
209//
210// IACCVIO - offset 0080
211// Entry:
212// Vectored into via hardware trap on Istream access violation or sign check error on PC.
213//
214// Function:
215// Build stack frame
216// a0 <- Faulting VA
217// a1 <- MMCSR (1 for ACV)
218// a2 <- -1 (for ifetch fault)
219// vector via entMM
220//
221
222 HDW_VECTOR(PAL_IACCVIO_ENTRY)
223Trap_Iaccvio:
224 DEBUGSTORE(0x42)
225 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
226 mtpr r31, ev5__ps // Set Ibox current mode to kernel
227
228 bis r11, r31, r12 // Save PS
229 bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
230
231
232 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
233 // no virt ref for next 2 cycles
234 mtpr r30, pt_usp // save user stack
235
236 bis r31, r31, r12 // Set new PS
237 mfpr r30, pt_ksp
238
239TRAP_IACCVIO_10_:
240 lda sp, 0-osfsf_c_size(sp)// allocate stack space
241 mfpr r14, exc_addr // get pc
242
243 stq r16, osfsf_a0(sp) // save regs
244 bic r14, 3, r16 // pass pc/va as a0
245
246 stq r17, osfsf_a1(sp) // a1
247 or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
248
249 stq r18, osfsf_a2(sp) // a2
250 mfpr r13, pt_entmm // get entry point
251
252 stq r11, osfsf_ps(sp) // save old ps
253 bis r12, r31, r11 // update ps
254
255 stq r16, osfsf_pc(sp) // save pc
256 stq r29, osfsf_gp(sp) // save gp
257
258 mtpr r13, exc_addr // load exc_addr with entMM
259 // 1 cycle to hw_rei
260 mfpr r29, pt_kgp // get the kgp
261
262 subq r31, 1, r18 // pass flag of istream, as a2
263 hw_rei_spe
264
265
266//
267// INTERRUPT - Interrupt Trap Entry Point
268//
269// INTERRUPT - offset 0100
270// Entry:
271// Vectored into via trap on hardware interrupt
272//
273// Function:
274// check for halt interrupt
275// check for passive release (current ipl geq requestor)
276// if necessary, switch to kernel mode push stack frame,
277// update ps (including current mode and ipl copies), sp, and gp
278// pass the interrupt info to the system module
279//
280//
281 HDW_VECTOR(PAL_INTERRUPT_ENTRY)
282Trap_Interrupt:
283 mfpr r13, ev5__intid // Fetch level of interruptor
284 mfpr r25, ev5__isr // Fetch interrupt summary register
285
286 srl r25, isr_v_hlt, r9 // Get HLT bit
287 mfpr r14, ev5__ipl
288
289 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
290 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
291
292 cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
293 bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
294
295 and r11, osfps_m_mode, r10 // get mode bit
296 beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
297
298 mtpr r30, pt_usp // save user stack
299 mfpr r30, pt_ksp // get kern stack
300
301TRAP_INTERRUPT_10_:
302 lda sp, (0-osfsf_c_size)(sp)// allocate stack space
303 mfpr r14, exc_addr // get pc
304
305 stq r11, osfsf_ps(sp) // save ps
306 stq r14, osfsf_pc(sp) // save pc
307
308 stq r29, osfsf_gp(sp) // push gp
309 stq r16, osfsf_a0(sp) // a0
310
311// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
312 mtpr r31, ev5__ps // Set Ibox current mode to kernel
313 stq r17, osfsf_a1(sp) // a1
314
315 stq r18, osfsf_a2(sp) // a2
316 subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
317
318 srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
319 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
320
321 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
322 bis r12, r31, r11 // set new ps
323
324 mfpr r12, pt_intmask
325 and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
326
327 /*
328 * Lance had space problems. We don't.
329 */
330 extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
331 mfpr r29, pt_kgp // update gp
332 mtpr r14, ev5__ipl // load the new IPL into Ibox
333 br r31, sys_interrupt // Go handle interrupt
334
335
336
337//
338// ITBMISS - Istream TBmiss Trap Entry Point
339//
340// ITBMISS - offset 0180
341// Entry:
342// Vectored into via hardware trap on Istream translation buffer miss.
343//
344// Function:
345// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
346// Can trap into DTBMISS_DOUBLE.
347// This routine can use the PALshadow registers r8, r9, and r10
348//
349//
350
351 HDW_VECTOR(PAL_ITB_MISS_ENTRY)
352Trap_Itbmiss:
353 // Real MM mapping
354 nop
355 mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
356
357 nop
358 mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
359
360pal_itb_ldq:
361 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
362 mtpr r10, exc_addr // Restore exc_address if there was a trap.
363
364 mfpr r31, ev5__va // Unlock VA in case there was a double miss
365 nop
366
367 and r8, osfpte_m_foe, r25 // Look for FOE set.
368 blbc r8, invalid_ipte_handler // PTE not valid.
369
370 nop
371 bne r25, foe_ipte_handler // FOE is set
372
373 nop
374 mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
375
376 hw_rei_stall //
377
378
379//
380// DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point
381//
382// DTBMISS_SINGLE - offset 0200
383// Entry:
384// Vectored into via hardware trap on Dstream single translation
385// buffer miss.
386//
387// Function:
388// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
389// Can trap into DTBMISS_DOUBLE.
390// This routine can use the PALshadow registers r8, r9, and r10
391//
392
393 HDW_VECTOR(PAL_DTB_MISS_ENTRY)
394Trap_Dtbmiss_Single:
395 mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
396 mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
397
398// DEBUGSTORE(0x45)
399// DEBUG_EXC_ADDR()
400 // Real MM mapping
401 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
402 mtpr r10, pt6 // Stash exc_addr away
403
404pal_dtb_ldq:
405 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
406 nop // Pad MF VA
407
408 mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
409 nop
410
411 mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
412 blbc r8, invalid_dpte_handler // Handle invalid PTE
413
414 mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
415 mfpr r10, pt6
416
417 // Following 2 instructions take 2 cycles
418 mtpr r10, exc_addr // Return linkage in case we trapped. E1.
419 mfpr r31, pt0 // Pad the write to dtb_tag
420
421 hw_rei // Done, return
422
423
424//
425// DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point
426//
427//
428// DTBMISS_DOUBLE - offset 0280
429// Entry:
430// Vectored into via hardware trap on Double TBmiss from single
431// miss flows.
432//
433// r8 - faulting VA
434// r9 - original MMstat
435// r10 - original exc_addr (both itb,dtb miss)
436// pt6 - original exc_addr (dtb miss flow only)
437// VA IPR - locked with original faulting VA
438//
439// Function:
440// Get PTE, if valid load TB and return.
441// If not valid then take TNV/ACV exception.
442//
443// pt4 and pt5 are reserved for this flow.
444//
445//
446//
447
448 HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
449Trap_Dtbmiss_double:
450 mtpr r8, pt4 // save r8 to do exc_addr check
451 mfpr r8, exc_addr
452 blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
453 mfpr r8, pt4 // restore r8
454 nop
455 mtpr r22, pt5 // Get some scratch space. E1.
456 // Due to virtual scheme, we can skip the first lookup and go
457 // right to fetch of level 2 PTE
458 sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
459 mtpr r21, pt4 // Get some scratch space. E1.
460
461 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
462 mfpr r21, pt_ptbr // Get physical address of the page table.
463
464 nop
465 addq r21, r22, r21 // Index into page table for level 2 PTE.
466
467 sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
468 ldq_p r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
469
470 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
471 blbc r21, double_pte_inv // Check for Invalid PTE.
472
473 srl r21, 32, r21 // extract PFN from PTE
474 sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
475
476 addq r21, r22, r21 // Index into page table for level 3 PTE.
477 nop
478
479 ldq_p r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
480 blbc r21, double_pte_inv // Check for invalid PTE.
481
482 mtpr r21, ev5__dtb_pte // Write the PTE. E0.
483 mfpr r22, pt5 // Restore scratch register
484
485 mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
486 mfpr r21, pt4 // Restore scratch register
487
488 nop // Pad write to tag.
489 nop
490
491 nop // Pad write to tag.
492 nop
493
494 hw_rei
495
496
497
498//
499// UNALIGN -- Dstream unalign trap
500//
501// UNALIGN - offset 0300
502// Entry:
503// Vectored into via hardware trap on unaligned Dstream reference.
504//
505// Function:
506// Build stack frame
507// a0 <- Faulting VA
508// a1 <- Opcode
509// a2 <- src/dst register number
510// vector via entUna
511//
512
513 HDW_VECTOR(PAL_UNALIGN_ENTRY)
514Trap_Unalign:
515/* DEBUGSTORE(0x47)*/
516 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
517 mtpr r31, ev5__ps // Set Ibox current mode to kernel
518
519 mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
520 mfpr r14, exc_addr // get pc
521
522 srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
523 blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
524
525 blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
526 // not set, must be a load
527 and r13, 0x1F, r8 // isolate ra
528
529 cmpeq r8, 0x1F, r8 // check for r31/F31
530 bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
531
532UNALIGN_NO_DISMISS:
533 bis r11, r31, r12 // Save PS
534 bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
535
536
537 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
538 // no virt ref for next 2 cycles
539 mtpr r30, pt_usp // save user stack
540
541 bis r31, r31, r12 // Set new PS
542 mfpr r30, pt_ksp
543
544UNALIGN_NO_DISMISS_10_:
545 mfpr r25, ev5__va // Unlock VA
546 lda sp, 0-osfsf_c_size(sp)// allocate stack space
547
548 mtpr r25, pt0 // Stash VA
549 stq r18, osfsf_a2(sp) // a2
550
551 stq r11, osfsf_ps(sp) // save old ps
552 srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
553
554 stq r29, osfsf_gp(sp) // save gp
555 addq r14, 4, r14 // inc PC past the ld/st
556
557 stq r17, osfsf_a1(sp) // a1
558 and r25, mm_stat_m_opcode, r17// Clean opocde for a1
559
560 stq r16, osfsf_a0(sp) // save regs
561 mfpr r16, pt0 // a0 <- va/unlock
562
563 stq r14, osfsf_pc(sp) // save pc
564 mfpr r25, pt_entuna // get entry point
565
566
567 bis r12, r31, r11 // update ps
568 br r31, unalign_trap_cont
569
570
571//
572// DFAULT - Dstream Fault Trap Entry Point
573//
574// DFAULT - offset 0380
575// Entry:
576// Vectored into via hardware trap on dstream fault or sign check
577// error on DVA.
578//
579// Function:
580// Ignore faults on FETCH/FETCH_M
581// Check for DFAULT in PAL
582// Build stack frame
583// a0 <- Faulting VA
584// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
585// a2 <- R/W
586// vector via entMM
587//
588//
589 HDW_VECTOR(PAL_D_FAULT_ENTRY)
590Trap_Dfault:
591// DEBUGSTORE(0x48)
592 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
593 mtpr r31, ev5__ps // Set Ibox current mode to kernel
594
595 mfpr r13, ev5__mm_stat // Get mmstat
596 mfpr r8, exc_addr // get pc, preserve r14
597
598 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
599 blbs r8, dfault_in_pal
600
601 bis r8, r31, r14 // move exc_addr to correct place
602 bis r11, r31, r12 // Save PS
603
604 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
605 // no virt ref for next 2 cycles
606 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
607
608 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
609 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
610
611 //dismiss exception if load to r31/f31
612 blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
613
614 // not a store or fetch, must be a load
615 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
616
617 and r9, 0x1F, r9 // isolate rnum
618 nop
619
620 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
621 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
622
623dfault_no_dismiss:
624 and r13, 0xf, r13 // Clean extra bits in mm_stat
625 bge r25, dfault_trap_cont // no stack swap needed if cm=kern
626
627
628 mtpr r30, pt_usp // save user stack
629 bis r31, r31, r12 // Set new PS
630
631 mfpr r30, pt_ksp
632 br r31, dfault_trap_cont
633
634
635//
636// MCHK - Machine Check Trap Entry Point
637//
638// MCHK - offset 0400
639// Entry:
640// Vectored into via hardware trap on machine check.
641//
642// Function:
643//
644//
645
646 HDW_VECTOR(PAL_MCHK_ENTRY)
647Trap_Mchk:
648 DEBUGSTORE(0x49)
649 mtpr r31, ic_flush_ctl // Flush the Icache
650 br r31, sys_machine_check
651
652
653//
654// OPCDEC - Illegal Opcode Trap Entry Point
655//
656// OPCDEC - offset 0480
657// Entry:
658// Vectored into via hardware trap on illegal opcode.
659//
660// Build stack frame
661// a0 <- code
662// a1 <- unpred
663// a2 <- unpred
664// vector via entIF
665//
666//
667
668 HDW_VECTOR(PAL_OPCDEC_ENTRY)
669Trap_Opcdec:
670 DEBUGSTORE(0x4a)
671//simos DEBUG_EXC_ADDR()
672 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
673 mtpr r31, ev5__ps // Set Ibox current mode to kernel
674
675 mfpr r14, exc_addr // get pc
676 blbs r14, pal_pal_bug_check // check opcdec in palmode
677
678 bis r11, r31, r12 // Save PS
679 bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
680
681
682 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
683 // no virt ref for next 2 cycles
684 mtpr r30, pt_usp // save user stack
685
686 bis r31, r31, r12 // Set new PS
687 mfpr r30, pt_ksp
688
689TRAP_OPCDEC_10_:
690 lda sp, 0-osfsf_c_size(sp)// allocate stack space
691 addq r14, 4, r14 // inc pc
692
693 stq r16, osfsf_a0(sp) // save regs
694 bis r31, osf_a0_opdec, r16 // set a0
695
696 stq r11, osfsf_ps(sp) // save old ps
697 mfpr r13, pt_entif // get entry point
698
699 stq r18, osfsf_a2(sp) // a2
700 stq r17, osfsf_a1(sp) // a1
701
702 stq r29, osfsf_gp(sp) // save gp
703 stq r14, osfsf_pc(sp) // save pc
704
705 bis r12, r31, r11 // update ps
706 mtpr r13, exc_addr // load exc_addr with entIF
707 // 1 cycle to hw_rei, E1
708
709 mfpr r29, pt_kgp // get the kgp, E1
710
711 hw_rei_spe // done, E1
712
713
714//
715// ARITH - Arithmetic Exception Trap Entry Point
716//
717// ARITH - offset 0500
718// Entry:
719// Vectored into via hardware trap on arithmetic excpetion.
720//
721// Function:
722// Build stack frame
723// a0 <- exc_sum
724// a1 <- exc_mask
725// a2 <- unpred
726// vector via entArith
727//
728//
729 HDW_VECTOR(PAL_ARITH_ENTRY)
730Trap_Arith:
731 DEBUGSTORE(0x4b)
732 and r11, osfps_m_mode, r12 // get mode bit
733 mfpr r31, ev5__va // unlock mbox
734
735 bis r11, r31, r25 // save ps
736 mfpr r14, exc_addr // get pc
737
738 nop
739 blbs r14, pal_pal_bug_check // arith trap from PAL
740
741 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
742 // no virt ref for next 2 cycles
743 beq r12, TRAP_ARITH_10_ // if zero we are in kern now
744
745 bis r31, r31, r25 // set the new ps
746 mtpr r30, pt_usp // save user stack
747
748 nop
749 mfpr r30, pt_ksp // get kern stack
750
751TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
752 mtpr r31, ev5__ps // Set Ibox current mode to kernel
753
754 nop // Pad current mode write and stq
755 mfpr r13, ev5__exc_sum // get the exc_sum
756
757 mfpr r12, pt_entarith
758 stq r14, osfsf_pc(sp) // save pc
759
760 stq r17, osfsf_a1(sp)
761 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
762
763 stq r11, osfsf_ps(sp) // save ps
764 bis r25, r31, r11 // set new ps
765
766 stq r16, osfsf_a0(sp) // save regs
767 srl r13, exc_sum_v_swc, r16 // shift data to correct position
768
769 stq r18, osfsf_a2(sp)
770// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
771 mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
772
773 stq r29, osfsf_gp(sp)
774 mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
775
776 mfpr r29, pt_kgp // get the kern gp - E1
777 hw_rei_spe // done - E1
778
779
780//
781// FEN - Illegal Floating Point Operation Trap Entry Point
782//
783// FEN - offset 0580
784// Entry:
785// Vectored into via hardware trap on illegal FP op.
786//
787// Function:
788// Build stack frame
789// a0 <- code
790// a1 <- unpred
791// a2 <- unpred
792// vector via entIF
793//
794//
795
796 HDW_VECTOR(PAL_FEN_ENTRY)
797Trap_Fen:
798 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
799 mtpr r31, ev5__ps // Set Ibox current mode to kernel
800
801 mfpr r14, exc_addr // get pc
802 blbs r14, pal_pal_bug_check // check opcdec in palmode
803
804 mfpr r13, ev5__icsr
805 nop
806
807 bis r11, r31, r12 // Save PS
808 bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
809
810 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
811 // no virt ref for next 2 cycles
812 mtpr r30, pt_usp // save user stack
813
814 bis r31, r31, r12 // Set new PS
815 mfpr r30, pt_ksp
816
817TRAP_FEN_10_:
818 lda sp, 0-osfsf_c_size(sp)// allocate stack space
819 srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
820
821
822 stq r16, osfsf_a0(sp) // save regs
823 mfpr r13, pt_entif // get entry point
824
825 stq r18, osfsf_a2(sp) // a2
826 stq r11, osfsf_ps(sp) // save old ps
827
828 stq r29, osfsf_gp(sp) // save gp
829 bis r12, r31, r11 // set new ps
830
831 stq r17, osfsf_a1(sp) // a1
832 blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
833
834 bis r31, osf_a0_fen, r16 // set a0
835 stq r14, osfsf_pc(sp) // save pc
836
837 mtpr r13, exc_addr // load exc_addr with entIF
838 // 1 cycle to hw_rei -E1
839
840 mfpr r29, pt_kgp // get the kgp -E1
841
842 hw_rei_spe // done -E1
843
844// FEN trap was taken, but the fault is really opcdec.
845 ALIGN_BRANCH
846fen_to_opcdec:
847 addq r14, 4, r14 // save PC+4
848 bis r31, osf_a0_opdec, r16 // set a0
849
850 stq r14, osfsf_pc(sp) // save pc
851 mtpr r13, exc_addr // load exc_addr with entIF
852 // 1 cycle to hw_rei
853
854 mfpr r29, pt_kgp // get the kgp
855 hw_rei_spe // done
856
857
858
859//////////////////////////////////////////////////////////////////////////////
860// Misc handlers - Start area for misc code.
861//////////////////////////////////////////////////////////////////////////////
862
863//
864// dfault_trap_cont
865// A dfault trap has been taken. The sp has been updated if necessary.
866// Push a stack frame a vector via entMM.
867//
868// Current state:
869// r12 - new PS
870// r13 - MMstat
871// VA - locked
872//
873//
874 ALIGN_BLOCK
875dfault_trap_cont:
876 lda sp, 0-osfsf_c_size(sp)// allocate stack space
877 mfpr r25, ev5__va // Fetch VA/unlock
878
879 stq r18, osfsf_a2(sp) // a2
880 and r13, 1, r18 // Clean r/w bit for a2
881
882 stq r16, osfsf_a0(sp) // save regs
883 bis r25, r31, r16 // a0 <- va
884
885 stq r17, osfsf_a1(sp) // a1
886 srl r13, 1, r17 // shift fault bits to right position
887
888 stq r11, osfsf_ps(sp) // save old ps
889 bis r12, r31, r11 // update ps
890
891 stq r14, osfsf_pc(sp) // save pc
892 mfpr r25, pt_entmm // get entry point
893
894 stq r29, osfsf_gp(sp) // save gp
895 cmovlbs r17, 1, r17 // a2. acv overrides fox.
896
897 mtpr r25, exc_addr // load exc_addr with entMM
898 // 1 cycle to hw_rei
899 mfpr r29, pt_kgp // get the kgp
900
901 hw_rei_spe // done
902
903//
904//unalign_trap_cont
905// An unalign trap has been taken. Just need to finish up a few things.
906//
907// Current state:
908// r25 - entUna
909// r13 - shifted MMstat
910//
911//
912 ALIGN_BLOCK
913unalign_trap_cont:
914 mtpr r25, exc_addr // load exc_addr with entUna
915 // 1 cycle to hw_rei
916
917
918 mfpr r29, pt_kgp // get the kgp
919 and r13, mm_stat_m_ra, r18 // Clean Ra for a2
920
921 hw_rei_spe // done
922
923
924
925//
926// dfault_in_pal
927// Dfault trap was taken, exc_addr points to a PAL PC.
928// r9 - mmstat<opcode> right justified
929// r8 - exception address
930//
931// These are the cases:
932// opcode was STQ -- from a stack builder, KSP not valid halt
933// r14 - original exc_addr
934// r11 - original PS
935// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
936// KSP not valid halt
937// r11 - original PS
938// r14 - original exc_addr
939// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
940// r11 - original PS
941// r14 - original exc_addr
942// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
943// r10 - original exc_addr
944// r11 - original PS
945//
946//
947//
948 ALIGN_BLOCK
949dfault_in_pal:
950 DEBUGSTORE(0x50)
951 bic r8, 3, r8 // Clean PC
952 mfpr r9, pal_base
953
954 mfpr r31, va // unlock VA
955
956 // if not real_mm, should never get here from miss flows
957
958 subq r9, r8, r8 // pal_base - offset
959
960 lda r9, pal_itb_ldq-pal_base(r8)
961 nop
962
963 beq r9, dfault_do_bugcheck
964 lda r9, pal_dtb_ldq-pal_base(r8)
965
966 beq r9, dfault_do_bugcheck
967
968//
969// KSP invalid halt case --
970ksp_inval_halt:
971 DEBUGSTORE(76)
972 bic r11, osfps_m_mode, r11 // set ps to kernel mode
973 mtpr r0, pt0
974
975 mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
976 mtpr r31, ips
977
978 mtpr r14, exc_addr // Set PC to instruction that caused trouble
979 bsr r0, pal_update_pcb // update the pcb
980
981 lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
982 br r31, sys_enter_console // enter the console
983
984 ALIGN_BRANCH
985dfault_do_bugcheck:
986 bis r10, r31, r14 // bugcheck expects exc_addr in r14
987 br r31, pal_pal_bug_check
988
989
990//
991// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
992// On entry -
993// r14 - exc_addr
994// VA is locked
995//
996//
997 ALIGN_BLOCK
998dfault_fetch_ldr31_err:
999 mtpr r11, ev5__dtb_cm
1000 mtpr r11, ev5__ps // Make sure ps hasn't changed
1001
1002 mfpr r31, va // unlock the mbox
1003 addq r14, 4, r14 // inc the pc to skip the fetch
1004
1005 mtpr r14, exc_addr // give ibox new PC
1006 mfpr r31, pt0 // pad exc_addr write
1007
1008 hw_rei
1009
1010
1011
1012 ALIGN_BLOCK
1013//
1014// sys_from_kern
1015// callsys from kernel mode - OS bugcheck machine check
1016//
1017//
1018sys_from_kern:
1019 mfpr r14, exc_addr // PC points to call_pal
1020 subq r14, 4, r14
1021
1022 lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
1023 br r31, pal_pal_mchk
1024
1025
1026// Continuation of long call_pal flows
1027//
1028// wrent_tbl
1029// Table to write *int in paltemps.
1030// 4 instructions/entry
1031// r16 has new value
1032//
1033//
1034 ALIGN_BLOCK
1035wrent_tbl:
1036//orig pvc_jsr wrent, dest=1
1037 nop
1038 mtpr r16, pt_entint
1039
1040 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1041 hw_rei
1042
1043
1044//orig pvc_jsr wrent, dest=1
1045 nop
1046 mtpr r16, pt_entarith
1047
1048 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1049 hw_rei
1050
1051
1052//orig pvc_jsr wrent, dest=1
1053 nop
1054 mtpr r16, pt_entmm
1055
1056 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1057 hw_rei
1058
1059
1060//orig pvc_jsr wrent, dest=1
1061 nop
1062 mtpr r16, pt_entif
1063
1064 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1065 hw_rei
1066
1067
1068//orig pvc_jsr wrent, dest=1
1069 nop
1070 mtpr r16, pt_entuna
1071
1072 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1073 hw_rei
1074
1075
1076//orig pvc_jsr wrent, dest=1
1077 nop
1078 mtpr r16, pt_entsys
1079
1080 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1081 hw_rei
1082
1083 ALIGN_BLOCK
1084//
1085// tbi_tbl
1086// Table to do tbi instructions
1087// 4 instructions per entry
1088//
1089tbi_tbl:
1090 // -2 tbia
1091//orig pvc_jsr tbi, dest=1
1092 mtpr r31, ev5__dtb_ia // Flush DTB
1093 mtpr r31, ev5__itb_ia // Flush ITB
1094
1095 hw_rei_stall
1096
1097 nop // Pad table
1098
1099 // -1 tbiap
1100//orig pvc_jsr tbi, dest=1
1101 mtpr r31, ev5__dtb_iap // Flush DTB
1102 mtpr r31, ev5__itb_iap // Flush ITB
1103
1104 hw_rei_stall
1105
1106 nop // Pad table
1107
1108
1109 // 0 unused
1110//orig pvc_jsr tbi, dest=1
1111 hw_rei // Pad table
1112 nop
1113 nop
1114 nop
1115
1116
1117 // 1 tbisi
1118//orig pvc_jsr tbi, dest=1
1119
1120 nop
1121 nop
1122 mtpr r17, ev5__itb_is // Flush ITB
1123 hw_rei_stall
1124
1125 // 2 tbisd
1126//orig pvc_jsr tbi, dest=1
1127 mtpr r17, ev5__dtb_is // Flush DTB.
1128 nop
1129
1130 nop
1131 hw_rei_stall
1132
1133
1134 // 3 tbis
1135//orig pvc_jsr tbi, dest=1
1136 mtpr r17, ev5__dtb_is // Flush DTB
1137 br r31, tbi_finish
1138 ALIGN_BRANCH
1139tbi_finish:
1140 mtpr r17, ev5__itb_is // Flush ITB
1141 hw_rei_stall
1142
1143
1144
1145 ALIGN_BLOCK
1146//
1147// bpt_bchk_common:
1148// Finish up the bpt/bchk instructions
1149//
1150bpt_bchk_common:
1151 stq r18, osfsf_a2(sp) // a2
1152 mfpr r13, pt_entif // get entry point
1153
1154 stq r12, osfsf_ps(sp) // save old ps
1155 stq r14, osfsf_pc(sp) // save pc
1156
1157 stq r29, osfsf_gp(sp) // save gp
1158 mtpr r13, exc_addr // load exc_addr with entIF
1159 // 1 cycle to hw_rei
1160
1161 mfpr r29, pt_kgp // get the kgp
1162
1163
1164 hw_rei_spe // done
1165
1166
1167 ALIGN_BLOCK
1168//
1169// rti_to_user
1170// Finish up the rti instruction
1171//
1172rti_to_user:
1173 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
1174 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
1175
1176 mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
1177 mtpr r25, pt_ksp // save off incase RTI to user
1178
1179 mfpr r30, pt_usp
1180 hw_rei_spe // and back
1181
1182
1183 ALIGN_BLOCK
1184//
1185// rti_to_kern
1186// Finish up the rti instruction
1187//
1188rti_to_kern:
1189 and r12, osfps_m_ipl, r11 // clean ps
1190 mfpr r12, pt_intmask // get int mask
1191
1192 extbl r12, r11, r12 // get mask for this ipl
1193 mtpr r25, pt_ksp // save off incase RTI to user
1194
1195 mtpr r12, ev5__ipl // set the new ipl.
1196 or r25, r31, sp // sp
1197
1198// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
1199 hw_rei
1200
1201 ALIGN_BLOCK
1202//
1203// swpctx_cont
1204// Finish up the swpctx instruction
1205//
1206
1207swpctx_cont:
1208
1209 bic r25, r24, r25 // clean icsr<FPE,PMP>
1210 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1211
1212 ldq_p r14, osfpcb_q_mmptr(r16)// get new mmptr
1213 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1214
1215 or r25, r12, r25 // icsr with new fen
1216 srl r23, 32, r24 // move asn to low asn pos
1217
1218 and r22, 1, r22
1219 sll r24, itb_asn_v_asn, r12
1220
1221 sll r22, icsr_v_pmp, r22
1222 nop
1223
1224 or r25, r22, r25 // icsr with new pme
1225
1226 sll r24, dtb_asn_v_asn, r24
1227
1228 subl r23, r13, r13 // gen new cc offset
1229 mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
1230
1231 mtpr r24, dtb_asn // Load up new ASN
1232 mtpr r25, icsr // write the icsr
1233
1234 sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1235 ldq_p r25, osfpcb_q_usp(r16) // get new usp
1236
1237 insll r13, 4, r13 // >> 32
1238// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1239 mtpr r14, pt_ptbr // load the new ptbr
1240
1241 mtpr r13, cc // set new offset
1242 ldq_p r30, osfpcb_q_ksp(r16) // get new ksp
1243
1244// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1245 mtpr r25, pt_usp // save usp
1246
1247no_pm_change_10_: hw_rei_stall // back we go
1248
1249 ALIGN_BLOCK
1250//
1251// swppal_cont - finish up the swppal call_pal
1252//
1253
1254swppal_cont:
1255 mfpr r2, pt_misc // get misc bits
1256 sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1257 or r2, r0, r2 // set the bit
1258 mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
1259 mtpr r2, pt_misc // update the chip
1260
1261 or r3, r31, r4
1262 mfpr r3, pt_impure // pass pointer to the impure area in r3
1263//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
1264//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
1265//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
1266//orig unfix_impure_ipr r3 // restore impure pointer
1267 lda r3, CNS_Q_IPR(r3)
1268 RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1269 RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1270 lda r3, -CNS_Q_IPR(r3)
1271
1272 or r31, r31, r0 // set status to success
1273// pvc_violate 1007
1274 jmp r31, (r4) // and call our friend, it's her problem now
1275
1276
1277swppal_fail:
1278 addq r0, 1, r0 // set unknown pal or not loaded
1279 hw_rei // and return
1280
1281
1282// .sbttl "Memory management"
1283
1284 ALIGN_BLOCK
1285//
1286//foe_ipte_handler
1287// IFOE detected on level 3 pte, sort out FOE vs ACV
1288//
1289// on entry:
1290// with
1291// R8 = pte
1292// R10 = pc
1293//
1294// Function
1295// Determine TNV vs ACV vs FOE. Build stack and dispatch
1296// Will not be here if TNV.
1297//
1298
1299foe_ipte_handler:
1300 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1301 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1302
1303 bis r11, r31, r12 // Save PS for stack write
1304 bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
1305
1306
1307 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1308 // no virt ref for next 2 cycles
1309 mtpr r30, pt_usp // save user stack
1310
1311 bis r31, r31, r11 // Set new PS
1312 mfpr r30, pt_ksp
1313
1314 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1315 nop
1316
1317foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1318 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1319
1320 or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
1321 mfpr r13, pt_entmm // get entry point
1322
1323 stq r16, osfsf_a0(sp) // a0
1324 or r14, r31, r16 // pass pc/va as a0
1325
1326 stq r17, osfsf_a1(sp) // a1
1327 nop
1328
1329 stq r18, osfsf_a2(sp) // a2
1330 lda r17, mmcsr_c_acv(r31) // assume ACV
1331
1332 stq r16, osfsf_pc(sp) // save pc
1333 cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
1334
1335 stq r12, osfsf_ps(sp) // save ps
1336 subq r31, 1, r18 // pass flag of istream as a2
1337
1338 stq r29, osfsf_gp(sp)
1339 mtpr r13, exc_addr // set vector address
1340
1341 mfpr r29, pt_kgp // load kgp
1342 hw_rei_spe // out to exec
1343
1344 ALIGN_BLOCK
1345//
1346//invalid_ipte_handler
1347// TNV detected on level 3 pte, sort out TNV vs ACV
1348//
1349// on entry:
1350// with
1351// R8 = pte
1352// R10 = pc
1353//
1354// Function
1355// Determine TNV vs ACV. Build stack and dispatch.
1356//
1357
1358invalid_ipte_handler:
1359 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1360 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1361
1362 bis r11, r31, r12 // Save PS for stack write
1363 bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
1364
1365
1366 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1367 // no virt ref for next 2 cycles
1368 mtpr r30, pt_usp // save user stack
1369
1370 bis r31, r31, r11 // Set new PS
1371 mfpr r30, pt_ksp
1372
1373 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1374 nop
1375
1376invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1377 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1378
1379 or r10, r31, r14 // Save pc/va in case TBmiss on stack
1380 mfpr r13, pt_entmm // get entry point
1381
1382 stq r16, osfsf_a0(sp) // a0
1383 or r14, r31, r16 // pass pc/va as a0
1384
1385 stq r17, osfsf_a1(sp) // a1
1386 nop
1387
1388 stq r18, osfsf_a2(sp) // a2
1389 and r25, 1, r17 // Isolate kre
1390
1391 stq r16, osfsf_pc(sp) // save pc
1392 xor r17, 1, r17 // map to acv/tnv as a1
1393
1394 stq r12, osfsf_ps(sp) // save ps
1395 subq r31, 1, r18 // pass flag of istream as a2
1396
1397 stq r29, osfsf_gp(sp)
1398 mtpr r13, exc_addr // set vector address
1399
1400 mfpr r29, pt_kgp // load kgp
1401 hw_rei_spe // out to exec
1402
1403
1404
1405
1406 ALIGN_BLOCK
1407//
1408//invalid_dpte_handler
1409// INVALID detected on level 3 pte, sort out TNV vs ACV
1410//
1411// on entry:
1412// with
1413// R10 = va
1414// R8 = pte
1415// R9 = mm_stat
1416// PT6 = pc
1417//
1418// Function
1419// Determine TNV vs ACV. Build stack and dispatch
1420//
1421
1422
1423invalid_dpte_handler:
1424 mfpr r12, pt6
1425 blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
1426
1427 bis r12, r31, r14 // save PC in case of tbmiss or fault
1428 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
1429
1430 mtpr r11, pt0 // Save PS for stack write
1431 and r25, mm_stat_m_opcode, r25 // isolate opcode
1432
1433 cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
1434 blbs r25, nmiss_fetch_ldr31_err // yes
1435
1436 //dismiss exception if load to r31/f31
1437 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
1438
1439 // not a store or fetch, must be a load
1440 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
1441
1442 and r25, 0x1F, r25 // isolate rnum
1443 nop
1444
1445 cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
1446 bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
1447
1448invalid_dpte_no_dismiss:
1449 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1450 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1451
1452 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1453 // no virt ref for next 2 cycles
1454 bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
1455
1456 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1457 mtpr r30, pt_usp // save user stack
1458
1459 bis r31, r31, r11 // Set new PS
1460 mfpr r30, pt_ksp
1461
1462invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
1463 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1464
1465 or r10, r31, r25 // Save va in case TBmiss on stack
1466 and r9, 1, r13 // save r/w flag
1467
1468 stq r16, osfsf_a0(sp) // a0
1469 or r25, r31, r16 // pass va as a0
1470
1471 stq r17, osfsf_a1(sp) // a1
1472 or r31, mmcsr_c_acv, r17 // assume acv
1473
1474 srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
1475 stq r29, osfsf_gp(sp)
1476
1477 stq r18, osfsf_a2(sp) // a2
1478 cmovlbs r13, r25, r12 // if write access move acv based on write enable
1479
1480 or r13, r31, r18 // pass flag of dstream access and read vs write
1481 mfpr r25, pt0 // get ps
1482
1483 stq r14, osfsf_pc(sp) // save pc
1484 mfpr r13, pt_entmm // get entry point
1485
1486 stq r25, osfsf_ps(sp) // save ps
1487 mtpr r13, exc_addr // set vector address
1488
1489 mfpr r29, pt_kgp // load kgp
1490 cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
1491
1492 hw_rei_spe // out to exec
1493
1494//
1495//
1496// We come here if we are erring on a dtb_miss, and the instr is a
1497// fetch, fetch_m, of load to r31/f31.
1498// The PC is incremented, and we return to the program.
1499// essentially ignoring the instruction and error.
1500//
1501//
1502 ALIGN_BLOCK
1503nmiss_fetch_ldr31_err:
1504 mfpr r12, pt6
1505 addq r12, 4, r12 // bump pc to pc+4
1506
1507 mtpr r12, exc_addr // and set entry point
1508 mfpr r31, pt0 // pad exc_addr write
1509
1510 hw_rei //
1511
1512 ALIGN_BLOCK
1513//
1514// double_pte_inv
1515// We had a single tbmiss which turned into a double tbmiss which found
1516// an invalid PTE. Return to single miss with a fake pte, and the invalid
1517// single miss flow will report the error.
1518//
1519// on entry:
1520// r21 PTE
1521// r22 available
1522// VA IPR locked with original fault VA
1523// pt4 saved r21
1524// pt5 saved r22
1525// pt6 original exc_addr
1526//
1527// on return to tbmiss flow:
1528// r8 fake PTE
1529//
1530//
1531//
1532double_pte_inv:
1533 srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
1534 mfpr r22, exc_addr // get the pc
1535
1536 lda r22, 4(r22) // inc the pc
1537 lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
1538
1539 cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
1540 mtpr r22, exc_addr // set for rei
1541
1542 mfpr r21, pt4 // restore regs
1543 mfpr r22, pt5 // restore regs
1544
1545 hw_rei // back to tb miss
1546
1547 ALIGN_BLOCK
1548//
1549//tnv_in_pal
1550// The only places in pal that ld or store are the
1551// stack builders, rti or retsys. Any of these mean we
1552// need to take a ksp not valid halt.
1553//
1554//
1555tnv_in_pal:
1556
1557
1558 br r31, ksp_inval_halt
1559
1560
1561// .sbttl "Icache flush routines"
1562
1563 ALIGN_BLOCK
1564//
1565// Common Icache flush routine.
1566//
1567//
1568//
1569pal_ic_flush:
1570 nop
1571 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
1572 nop
1573 nop
1574
1575// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
1576 nop
1577 nop
1578 nop
1579 nop
1580
1581 nop
1582 nop
1583 nop
1584 nop
1585
1586 nop
1587 nop // 10
1588
1589 nop
1590 nop
1591 nop
1592 nop
1593
1594 nop
1595 nop
1596 nop
1597 nop
1598
1599 nop
1600 nop // 20
1601
1602 nop
1603 nop
1604 nop
1605 nop
1606
1607 nop
1608 nop
1609 nop
1610 nop
1611
1612 nop
1613 nop // 30
1614 nop
1615 nop
1616 nop
1617 nop
1618
1619 nop
1620 nop
1621 nop
1622 nop
1623
1624 nop
1625 nop // 40
1626
1627 nop
1628 nop
1629
1630one_cycle_and_hw_rei:
1631 nop
1632 nop
1633
1634 hw_rei_stall
1635
1636 ALIGN_BLOCK
1637//
1638//osfpal_calpal_opcdec
1639// Here for all opcdec CALL_PALs
1640//
1641// Build stack frame
1642// a0 <- code
1643// a1 <- unpred
1644// a2 <- unpred
1645// vector via entIF
1646//
1647//
1648
1649osfpal_calpal_opcdec:
1650 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1651 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1652
1653 mfpr r14, exc_addr // get pc
1654 nop
1655
1656 bis r11, r31, r12 // Save PS for stack write
1657 bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
1658
1659
1660 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1661 // no virt ref for next 2 cycles
1662 mtpr r30, pt_usp // save user stack
1663
1664 bis r31, r31, r11 // Set new PS
1665 mfpr r30, pt_ksp
1666
1667osfpal_calpal_opcdec_10_:
1668 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1669 nop
1670
1671 stq r16, osfsf_a0(sp) // save regs
1672 bis r31, osf_a0_opdec, r16 // set a0
1673
1674 stq r18, osfsf_a2(sp) // a2
1675 mfpr r13, pt_entif // get entry point
1676
1677 stq r12, osfsf_ps(sp) // save old ps
1678 stq r17, osfsf_a1(sp) // a1
1679
1680 stq r14, osfsf_pc(sp) // save pc
1681 nop
1682
1683 stq r29, osfsf_gp(sp) // save gp
1684 mtpr r13, exc_addr // load exc_addr with entIF
1685 // 1 cycle to hw_rei
1686
1687 mfpr r29, pt_kgp // get the kgp
1688
1689
1690 hw_rei_spe // done
1691
1692
1693
1694
1695
1696//
1697//pal_update_pcb
1698// Update the PCB with the current SP, AST, and CC info
1699//
1700// r0 - return linkage
1701//
1702 ALIGN_BLOCK
1703
1704pal_update_pcb:
1705 mfpr r12, pt_pcbb // get pcbb
1706 and r11, osfps_m_mode, r25 // get mode
1707 beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
1708 mtpr r30, pt_usp // save user stack
1709 stq_p r30, osfpcb_q_usp(r12) // store usp
1710 br r31, pal_update_pcb_20_ // join common
1711pal_update_pcb_10_: stq_p r30, osfpcb_q_ksp(r12) // store ksp
1712pal_update_pcb_20_: rpcc r13 // get cyccounter
1713 srl r13, 32, r14 // move offset
1714 addl r13, r14, r14 // merge for new time
1715 stl_p r14, osfpcb_l_cc(r12) // save time
1716
1717//orig pvc_jsr updpcb, bsr=1, dest=1
1718 ret r31, (r0)
1719
1720
1721//
1722// pal_save_state
1723//
1724// Function
1725// All chip state saved, all PT's, SR's FR's, IPR's
1726//
1727//
1728// Regs' on entry...
1729//
1730// R0 = halt code
1731// pt0 = r0
1732// R1 = pointer to impure
1733// pt4 = r1
1734// R3 = return addr
1735// pt5 = r3
1736//
1737// register usage:
1738// r0 = halt_code
1739// r1 = addr of impure area
1740// r3 = return_address
1741// r4 = scratch
1742//
1743//
1744
1745 ALIGN_BLOCK
1746 .globl pal_save_state
1747pal_save_state:
1748//
1749//
1750// start of implementation independent save routine
1751//
1752// the impure area is larger than the addressibility of hw_ld and hw_st
1753// therefore, we need to play some games: The impure area
1754// is informally divided into the "machine independent" part and the
1755// "machine dependent" part. The state that will be saved in the
1756// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
1757// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
1758// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
1759// macros will automagically adjust the offset correctly.
1760//
1761
1762// The distributed code is commented out and followed by corresponding SRC code.
1763// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
1764
1765//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
1766 lda r1, 0x200(r1) // Point to center of CPU segment
1767//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
1768 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
1769//orig store_reg1 hlt, r0, r1, ipr=1
1770 SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
1771
1772 mfpr r0, pt0 // get r0 back //orig
1773//orig store_reg1 0, r0, r1 // save r0
1774 SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
1775
1776 mfpr r0, pt4 // get r1 back //orig
1777//orig store_reg1 1, r0, r1 // save r1
1778 SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
1779
1780//orig store_reg 2 // save r2
1781 SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
1782
1783 mfpr r0, pt5 // get r3 back //orig
1784//orig store_reg1 3, r0, r1 // save r3
1785 SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
1786
1787 // reason code has been saved
1788 // r0 has been saved
1789 // r1 has been saved
1790 // r2 has been saved
1791 // r3 has been saved
1792 // pt0, pt4, pt5 have been lost
1793
1794 //
1795 // Get out of shadow mode
1796 //
1797
1798 mfpr r2, icsr // Get icsr
1799 ldah r0, (1<<(icsr_v_sde-16))(r31)
1800 bic r2, r0, r0 // ICSR with SDE clear
1801 mtpr r0, icsr // Turn off SDE
1802
1803 mfpr r31, pt0 // SDE bubble cycle 1
1804 mfpr r31, pt0 // SDE bubble cycle 2
1805 mfpr r31, pt0 // SDE bubble cycle 3
1806 nop
1807
1808
1809 // save integer regs R4-r31
1810 SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
1811 SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
1812 SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
1813 SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
1814 SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
1815 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
1816 SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
1817 SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
1818 SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
1819 SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
1820 SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
1821 SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
1822 SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
1823 SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
1824 SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
1825 SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
1826 SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
1827 SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
1828 SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
1829 SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
1830 SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
1831 SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
1832 SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
1833 SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
1834 SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
1835 SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
1836 SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
1837 SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
1838
1839 // save all paltemp regs except pt0
1840
1841//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
1842//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
1843
1844 lda r1, -0x200(r1) // Restore the impure base address.
1845 lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
1846 SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
1847 SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
1848 SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
1849 SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
1850 SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
1851 SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
1852 SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
1853 SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
1854 SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
1855 SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
1856 SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
1857 SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
1858 SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
1859 SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
1860 SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
1861 SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
1862 SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
1863 SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
1864 SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
1865 SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
1866 SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
1867 SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
1868 SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
1869 SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
1870
1871 // Restore shadow mode
1872 mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write)
1873 mfpr r31, pt0
1874 mtpr r2, icsr // Restore original ICSR
1875
1876 mfpr r31, pt0 // SDE bubble cycle 1
1877 mfpr r31, pt0 // SDE bubble cycle 2
1878 mfpr r31, pt0 // SDE bubble cycle 3
1879 nop
1880
1881 // save all integer shadow regs
1882 SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
1883 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
1884 SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
1885 SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
1886 SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
1887 SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
1888 SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
1889 SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
1890
1891 SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
1892 SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
1893 SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
1894 SAVE_IPR(va,CNS_Q_VA,r1)
1895 SAVE_IPR(icsr,CNS_Q_ICSR,r1)
1896 SAVE_IPR(ipl,CNS_Q_IPL,r1)
1897 SAVE_IPR(ips,CNS_Q_IPS,r1)
1898 SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
1899 SAVE_IPR(aster,CNS_Q_ASTER,r1)
1900 SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
1901 SAVE_IPR(sirr,CNS_Q_SIRR,r1)
1902 SAVE_IPR(isr,CNS_Q_ISR,r1)
1903 SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
1904 SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
1905 SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
1906
1907//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
1908//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
1909//orig // PVC violation applies only to
1910pvc$osf35$379: // loads. HW_ST ok here, so ignore
1911 SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
1912
1913
1914 //the following iprs are informational only -- will not be restored
1915
1916 SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
1917 SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
1918 SAVE_IPR(intId,CNS_Q_INT_ID,r1)
1919 SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
1920 SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
1921 ldah r14, 0xFFF0(zero)
1922 zap r14, 0xE0, r14 // Get base address of CBOX IPRs
1923 NOP // Pad mfpr dcPerr out of shadow of
1924 NOP // last store
1925 NOP
1926 SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
1927
1928 // read cbox ipr state
1929
1930 mb
1931 ldq_p r2, scCtl(r14)
1932 ldq_p r13, ldLock(r14)
1933 ldq_p r4, scAddr(r14)
1934 ldq_p r5, eiAddr(r14)
1935 ldq_p r6, bcTagAddr(r14)
1936 ldq_p r7, fillSyn(r14)
1937 bis r5, r4, zero // Make sure all loads complete before
1938 bis r7, r6, zero // reading registers that unlock them.
1939 ldq_p r8, scStat(r14) // Unlocks scAddr.
1940 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
1941 ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
1942 mb
1943
1944 // save cbox ipr state
1945 SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
1946 SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
1947 SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
1948 SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
1949 SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
1950 SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
1951 SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
1952 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
1953 //bc_config? sl_rcv?
1954
1955// restore impure base
1956//orig unfix_impure_ipr r1
1957 lda r1, -CNS_Q_IPR(r1)
1958
1959// save all floating regs
1960 mfpr r0, icsr // get icsr
1961 or r31, 1, r2 // get a one
1962 sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
1963 or r2, r0, r0 // set FEN on
1964 mtpr r0, icsr // write to icsr, enabling FEN
1965
1966// map the save area virtually
1967 mtpr r31, dtbIa // Clear all DTB entries
1968 srl r1, va_s_off, r0 // Clean off byte-within-page offset
1969 sll r0, pte_v_pfn, r0 // Shift to form PFN
1970 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1971 mtpr r0, dtbPte // Load the PTE and set valid
1972 mtpr r1, dtbTag // Write the PTE and tag into the DTB
1973
1974
1975// map the next page too - in case the impure area crosses a page boundary
1976 lda r4, (1<<va_s_off)(r1) // Generate address for next page
1977 srl r4, va_s_off, r0 // Clean off byte-within-page offset
1978 sll r0, pte_v_pfn, r0 // Shift to form PFN
1979 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1980 mtpr r0, dtbPte // Load the PTE and set valid
1981 mtpr r4, dtbTag // Write the PTE and tag into the DTB
1982
1983 sll r31, 0, r31 // stall cycle 1
1984 sll r31, 0, r31 // stall cycle 2
1985 sll r31, 0, r31 // stall cycle 3
1986 nop
1987
1988// add offset for saving fpr regs
1989//orig fix_impure_gpr r1
1990 lda r1, 0x200(r1) // Point to center of CPU segment
1991
1992// now save the regs - F0-F31
1993 mf_fpcr f0 // original
1994
1995 SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
1996 SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
1997 SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
1998 SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
1999 SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
2000 SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
2001 SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
2002 SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
2003 SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
2004 SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
2005 SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
2006 SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
2007 SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
2008 SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
2009 SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
2010 SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
2011 SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
2012 SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
2013 SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
2014 SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
2015 SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2016 SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2017 SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2018 SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2019 SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2020 SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2021 SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2022 SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2023 SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2024 SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2025 SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2026 SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2027
2028//switch impure offset from gpr to ipr---
2029//orig unfix_impure_gpr r1
2030//orig fix_impure_ipr r1
2031//orig store_reg1 fpcsr, f0, r1, fpcsr=1
2032
2033 SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach
2034 lda r1, -0x200(r1) // Restore the impure base address
2035
2036// and back to gpr ---
2037//orig unfix_impure_ipr r1
2038//orig fix_impure_gpr r1
2039
2040//orig lda r0, cns_mchksize(r31) // get size of mchk area
2041//orig store_reg1 mchkflag, r0, r1, ipr=1
2042//orig mb
2043
2044 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
2045 // save this using the IPR base (it is closer) not the GRP base as they used...pb
2046 lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
2047 SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2048 mb
2049
2050//orig or r31, 1, r0 // get a one
2051//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
2052//orig mb
2053
2054 lda r1, -CNS_Q_IPR(r1) // back to the base
2055 lda r1, 0x200(r1) // Point to center of CPU segment
2056 or r31, 1, r0 // get a one
2057 SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
2058 mb
2059
2060 // restore impure area base
2061//orig unfix_impure_gpr r1
2062 lda r1, -0x200(r1) // Point to center of CPU segment
2063
2064 mtpr r31, dtb_ia // clear the dtb
2065 mtpr r31, itb_ia // clear the itb
2066
2067//orig pvc_jsr savsta, bsr=1, dest=1
2068 ret r31, (r3) // and back we go
2069
2070
2071
2072// .sbttl "PAL_RESTORE_STATE"
2073//
2074//
2075// Pal_restore_state
2076//
2077//
2078// register usage:
2079// r1 = addr of impure area
2080// r3 = return_address
2081// all other regs are scratchable, as they are about to
2082// be reloaded from ram.
2083//
2084// Function:
2085// All chip state restored, all SRs, FRs, PTs, IPRs
2086// *** except R1, R3, PT0, PT4, PT5 ***
2087//
2088//
2089 ALIGN_BLOCK
2090pal_restore_state:
2091
2092//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2093
2094// map the console io area virtually
2095 mtpr r31, dtbIa // Clear all DTB entries
2096 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2097 sll r0, pte_v_pfn, r0 // Shift to form PFN
2098 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2099 mtpr r0, dtbPte // Load the PTE and set valid
2100 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2101
2102
2103// map the next page too, in case impure area crosses page boundary
2104 lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
2105 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2106 sll r0, pte_v_pfn, r0 // Shift to form PFN
2107 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2108 mtpr r0, dtbPte // Load the PTE and set valid
2109 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2110
2111// save all floating regs
2112 mfpr r0, icsr // Get current ICSR
2113 bis zero, 1, r2 // Get a '1'
2114 or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
2115 sll r2, icsr_v_fpe, r2 // Shift bits into position
2116 bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
2117 mtpr r0, icsr // Update the chip
2118
2119 mfpr r31, pt0 // FPE bubble cycle 1 //orig
2120 mfpr r31, pt0 // FPE bubble cycle 2 //orig
2121 mfpr r31, pt0 // FPE bubble cycle 3 //orig
2122
2123//orig fix_impure_ipr r1
2124//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
2125//orig mt_fpcr f0
2126//orig
2127//orig unfix_impure_ipr r1
2128//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
2129 lda r1, 200(r1) // Point to base of IPR area again
2130 RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
2131 mt_fpcr f0 // original
2132
2133 lda r1, 0x200(r1) // point to center of CPU segment
2134
2135// restore all floating regs
2136 RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
2137 RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
2138 RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
2139 RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
2140 RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
2141 RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
2142 RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
2143 RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
2144 RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
2145 RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
2146 RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
2147 RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
2148 RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
2149 RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
2150 RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
2151 RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
2152 RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
2153 RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
2154 RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
2155 RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
2156 RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2157 RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2158 RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2159 RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2160 RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2161 RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2162 RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2163 RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2164 RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2165 RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2166 RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2167 RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2168
2169// switch impure pointer from gpr to ipr area --
2170//orig unfix_impure_gpr r1
2171//orig fix_impure_ipr r1
2172 lda r1, -0x200(r1) // Restore base address of impure area.
2173 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
2174
2175// restore all pal regs
2176 RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
2177 RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
2178 RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
2179 RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
2180 RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
2181 RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
2182 RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
2183 RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
2184 RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
2185 RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
2186 RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
2187 RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
2188 RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
2189 RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
2190 RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
2191 RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
2192 RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
2193 RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
2194 RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
2195 RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
2196 RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2197 RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2198 RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2199 RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2200
2201
2202//orig restore_reg exc_addr, ipr=1 // restore ipr
2203//orig restore_reg pal_base, ipr=1 // restore ipr
2204//orig restore_reg ipl, ipr=1 // restore ipr
2205//orig restore_reg ps, ipr=1 // restore ipr
2206//orig mtpr r0, dtb_cm // set current mode in mbox too
2207//orig restore_reg itb_asn, ipr=1
2208//orig srl r0, itb_asn_v_asn, r0
2209//orig sll r0, dtb_asn_v_asn, r0
2210//orig mtpr r0, dtb_asn // set ASN in Mbox too
2211//orig restore_reg ivptbr, ipr=1
2212//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
2213//orig restore_reg mcsr, ipr=1
2214//orig restore_reg aster, ipr=1
2215//orig restore_reg astrr, ipr=1
2216//orig restore_reg sirr, ipr=1
2217//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
2218//orig mfpr r31, pt0 // (may issue with mt maf_mode)
2219//orig mfpr r31, pt0 // bubble cycle 1
2220//orig mfpr r31, pt0 // bubble cycle 2
2221//orig mfpr r31, pt0 // bubble cycle 3
2222//orig mfpr r31, pt0 // (may issue with following ld)
2223
2224 // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
2225 RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2226 RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2227 RESTORE_IPR(ipl,CNS_Q_IPL,r1)
2228 RESTORE_IPR(ips,CNS_Q_IPS,r1)
2229 mtpr r0, dtbCm // Set Mbox current mode too.
2230 RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2231 srl r0, 4, r0
2232 sll r0, 57, r0
2233 mtpr r0, dtbAsn // Set Mbox ASN too
2234 RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2235 mtpr r0, mVptBr // Set Mbox VptBr too
2236 RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
2237 RESTORE_IPR(aster,CNS_Q_ASTER,r1)
2238 RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
2239 RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
2240 RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
2241 STALL
2242 STALL
2243 STALL
2244 STALL
2245 STALL
2246
2247
2248 // restore all integer shadow regs
2249 RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
2250 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2251 RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2252 RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2253 RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2254 RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2255 RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2256 RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2257 RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2258
2259 //
2260 // Get out of shadow mode
2261 //
2262
2263 mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway)
2264 mfpr r31, pt0 // ""
2265 mfpr r0, icsr // Get icsr
2266 ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location
2267 bic r0, r2, r2 // ICSR with SDE clear
2268 mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles
2269
2270 mfpr r31, pt0 // SDE bubble cycle 1
2271 mfpr r31, pt0 // SDE bubble cycle 2
2272 mfpr r31, pt0 // SDE bubble cycle 3
2273 nop
2274
2275// switch impure pointer from ipr to gpr area --
2276//orig unfix_impure_ipr r1
2277//orig fix_impure_gpr r1
2278
2279// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
2280
2281 lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
2282 lda r1, 0x200(r1) // Point to center of CPU segment
2283
2284 // restore all integer regs
2285 RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
2286 RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
2287 RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
2288 RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
2289 RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
2290 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
2291 RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
2292 RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
2293 RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
2294 RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
2295 RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
2296 RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
2297 RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
2298 RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
2299 RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
2300 RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
2301 RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2302 RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2303 RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2304 RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2305 RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2306 RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2307 RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2308 RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2309 RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2310 RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2311 RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2312 RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2313
2314//orig // switch impure pointer from gpr to ipr area --
2315//orig unfix_impure_gpr r1
2316//orig fix_impure_ipr r1
2317//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
2318
2319 lda t0, -0x200(t0) // Restore base address of impure area.
2320 lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
2321 RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
2322
2323//orig // and back again --
2324//orig unfix_impure_ipr r1
2325//orig fix_impure_gpr r1
2326//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
2327//orig mb
2328
2329 lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
2330 lda t0, 0x200(t0) // and back to center of CPU segment
2331 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
2332 mb
2333
2334//orig // and back we go
2335//orig// restore_reg 3
2336//orig restore_reg 2
2337//orig// restore_reg 1
2338//orig restore_reg 0
2339//orig // restore impure area base
2340//orig unfix_impure_gpr r1
2341
2342 RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
2343 RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
2344 lda r1, -0x200(r1) // Restore impure base address
2345
2346 mfpr r31, pt0 // stall for ldq_p above //orig
2347
2348 mtpr r31, dtb_ia // clear the tb //orig
2349 mtpr r31, itb_ia // clear the itb //orig
2350
2351//orig pvc_jsr rststa, bsr=1, dest=1
2352 ret r31, (r3) // back we go //orig
2353
2354
2355//
2356// pal_pal_bug_check -- code has found a bugcheck situation.
2357// Set things up and join common machine check flow.
2358//
2359// Input:
2360// r14 - exc_addr
2361//
2362// On exit:
2363// pt0 - saved r0
2364// pt1 - saved r1
2365// pt4 - saved r4
2366// pt5 - saved r5
2367// pt6 - saved r6
2368// pt10 - saved exc_addr
2369// pt_misc<47:32> - mchk code
2370// pt_misc<31:16> - scb vector
2371// r14 - base of Cbox IPRs in IO space
2372// MCES<mchk> is set
2373//
2374
2375 ALIGN_BLOCK
2376 .globl pal_pal_bug_check_from_int
2377pal_pal_bug_check_from_int:
2378 DEBUGSTORE(0x79)
2379//simos DEBUG_EXC_ADDR()
2380 DEBUGSTORE(0x20)
2381//simos bsr r25, put_hex
2382 lda r25, mchk_c_bugcheck(r31)
2383 addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
2384 br r31, pal_pal_mchk
2385 nop
2386
2387pal_pal_bug_check:
2388 lda r25, mchk_c_bugcheck(r31)
2389
2390pal_pal_mchk:
2391 sll r25, 32, r25 // Move mchk code to position
2392
2393 mtpr r14, pt10 // Stash exc_addr
2394 mtpr r14, exc_addr
2395
2396 mfpr r12, pt_misc // Get MCES and scratch
2397 zap r12, 0x3c, r12
2398
2399 or r12, r25, r12 // Combine mchk code
2400 lda r25, scb_v_procmchk(r31) // Get SCB vector
2401
2402 sll r25, 16, r25 // Move SCBv to position
2403 or r12, r25, r25 // Combine SCBv
2404
2405 mtpr r0, pt0 // Stash for scratch
2406 bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
2407
2408 mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
2409 ldah r14, 0xfff0(r31)
2410
2411 mtpr r1, pt1 // Stash for scratch
2412 zap r14, 0xE0, r14 // Get Cbox IPR base
2413
2414 mtpr r4, pt4
2415 mtpr r5, pt5
2416
2417 mtpr r6, pt6
2418 blbs r12, sys_double_machine_check // MCHK halt if double machine check
2419
2420 br r31, sys_mchk_collect_iprs // Join common machine check flow
2421
2422
2423
2424// align_to_call_pal_section
2425// Align to address of first call_pal entry point - 2000
2426
2427//
2428// HALT - PALcode for HALT instruction
2429//
2430// Entry:
2431// Vectored into via hardware PALcode instruction dispatch.
2432//
2433// Function:
2434// GO to console code
2435//
2436//
2437
2438 .text 1
2439// . = 0x2000
2440 CALL_PAL_PRIV(PAL_HALT_ENTRY)
2441call_pal_halt:
2442 mfpr r31, pt0 // Pad exc_addr read
2443 mfpr r31, pt0
2444
2445 mfpr r12, exc_addr // get PC
2446 subq r12, 4, r12 // Point to the HALT
2447
2448 mtpr r12, exc_addr
2449 mtpr r0, pt0
2450
2451//orig pvc_jsr updpcb, bsr=1
2452 bsr r0, pal_update_pcb // update the pcb
2453 lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
2454 br r31, sys_enter_console // enter the console
2455
2456//
2457// CFLUSH - PALcode for CFLUSH instruction
2458//
2459// Entry:
2460// Vectored into via hardware PALcode instruction dispatch.
2461//
2462// R16 - contains the PFN of the page to be flushed
2463//
2464// Function:
2465// Flush all Dstream caches of 1 entire page
2466// The CFLUSH routine is in the system specific module.
2467//
2468//
2469
2470 CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
2471Call_Pal_Cflush:
2472 br r31, sys_cflush
2473
2474//
2475// DRAINA - PALcode for DRAINA instruction
2476//
2477// Entry:
2478// Vectored into via hardware PALcode instruction dispatch.
2479// Implicit TRAPB performed by hardware.
2480//
2481// Function:
2482// Stall instruction issue until all prior instructions are guaranteed to
2483// complete without incurring aborts. For the EV5 implementation, this
2484// means waiting until all pending DREADS are returned.
2485//
2486//
2487
2488 CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
2489Call_Pal_Draina:
2490 ldah r14, 0x100(r31) // Init counter. Value?
2491 nop
2492
2493DRAINA_LOOP:
2494 subq r14, 1, r14 // Decrement counter
2495 mfpr r13, ev5__maf_mode // Fetch status bit
2496
2497 srl r13, maf_mode_v_dread_pending, r13
2498 ble r14, DRAINA_LOOP_TOO_LONG
2499
2500 nop
2501 blbs r13, DRAINA_LOOP // Wait until all DREADS clear
2502
2503 hw_rei
2504
2505DRAINA_LOOP_TOO_LONG:
2506 br r31, call_pal_halt
2507
2508// CALL_PAL OPCDECs
2509
2510 CALL_PAL_PRIV(0x0003)
2511CallPal_OpcDec03:
2512 br r31, osfpal_calpal_opcdec
2513
2514 CALL_PAL_PRIV(0x0004)
2515CallPal_OpcDec04:
2516 br r31, osfpal_calpal_opcdec
2517
2518 CALL_PAL_PRIV(0x0005)
2519CallPal_OpcDec05:
2520 br r31, osfpal_calpal_opcdec
2521
2522 CALL_PAL_PRIV(0x0006)
2523CallPal_OpcDec06:
2524 br r31, osfpal_calpal_opcdec
2525
2526 CALL_PAL_PRIV(0x0007)
2527CallPal_OpcDec07:
2528 br r31, osfpal_calpal_opcdec
2529
2530 CALL_PAL_PRIV(0x0008)
2531CallPal_OpcDec08:
2532 br r31, osfpal_calpal_opcdec
2533
2534//
2535// CSERVE - PALcode for CSERVE instruction
2536//
2537// Entry:
2538// Vectored into via hardware PALcode instruction dispatch.
2539//
2540// Function:
2541// Various functions for private use of console software
2542//
2543// option selector in r0
2544// arguments in r16....
2545// The CSERVE routine is in the system specific module.
2546//
2547//
2548
2549 CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
2550Call_Pal_Cserve:
2551 br r31, sys_cserve
2552
2553//
2554// swppal - PALcode for swppal instruction
2555//
2556// Entry:
2557// Vectored into via hardware PALcode instruction dispatch.
2558// Vectored into via hardware PALcode instruction dispatch.
2559// R16 contains the new PAL identifier
2560// R17:R21 contain implementation-specific entry parameters
2561//
2562// R0 receives status:
2563// 0 success (PAL was switched)
2564// 1 unknown PAL variant
2565// 2 known PAL variant, but PAL not loaded
2566//
2567//
2568// Function:
2569// Swap control to another PAL.
2570//
2571
2572 CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
2573Call_Pal_Swppal:
2574 cmpule r16, 255, r0 // see if a kibble was passed
2575 cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
2576
2577 or r16, r31, r3 // set r3 incase this is a address
2578 blbc r0, swppal_cont // nope, try it as an address
2579
2580 cmpeq r16, 2, r0 // is it our friend OSF?
2581 blbc r0, swppal_fail // nope, don't know this fellow
2582
2583 br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
2584
2585// .global osfpal_hw_entry_reset
2586// .weak osfpal_hw_entry_reset
2587// .long <osfpal_hw_entry_reset-pal_start>
2588//orig halt // don't know how to get the address here - kludge ok, load pal at 0
2589 .long 0 // ?? hack upon hack...pb
2590
2591CALL_PAL_SWPPAL_10_: ldl_p r3, 0(r2) // fetch target addr
2592// ble r3, swppal_fail ; if OSF not linked in say not loaded.
2593 mfpr r2, pal_base // fetch pal base
2594
2595 addq r2, r3, r3 // add pal base
2596 lda r2, 0x3FFF(r31) // get pal base checker mask
2597
2598 and r3, r2, r2 // any funky bits set?
2599 cmpeq r2, 0, r0 //
2600
2601 blbc r0, swppal_fail // return unknown if bad bit set.
2602 br r31, swppal_cont
2603
2604// .sbttl "CALL_PAL OPCDECs"
2605
2606 CALL_PAL_PRIV(0x000B)
2607CallPal_OpcDec0B:
2608 br r31, osfpal_calpal_opcdec
2609
2610 CALL_PAL_PRIV(0x000C)
2611CallPal_OpcDec0C:
2612 br r31, osfpal_calpal_opcdec
2613
2614//
2615// wripir - PALcode for wripir instruction
2616//
2617// Entry:
2618// Vectored into via hardware PALcode instruction dispatch.
2619// r16 = processor number to interrupt
2620//
2621// Function:
2622// IPIR <- R16
2623// Handled in system-specific code
2624//
2625// Exit:
2626// interprocessor interrupt is recorded on the target processor
2627// and is initiated when the proper enabling conditions are present.
2628//
2629
2630 CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
2631Call_Pal_Wrpir:
2632 br r31, sys_wripir
2633
2634// .sbttl "CALL_PAL OPCDECs"
2635
2636 CALL_PAL_PRIV(0x000E)
2637CallPal_OpcDec0E:
2638 br r31, osfpal_calpal_opcdec
2639
2640 CALL_PAL_PRIV(0x000F)
2641CallPal_OpcDec0F:
2642 br r31, osfpal_calpal_opcdec
2643
2644//
2645// rdmces - PALcode for rdmces instruction
2646//
2647// Entry:
2648// Vectored into via hardware PALcode instruction dispatch.
2649//
2650// Function:
2651// R0 <- ZEXT(MCES)
2652//
2653
2654 CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
2655Call_Pal_Rdmces:
2656 mfpr r0, pt_mces // Read from PALtemp
2657 and r0, mces_m_all, r0 // Clear other bits
2658
2659 hw_rei
2660
2661//
2662// wrmces - PALcode for wrmces instruction
2663//
2664// Entry:
2665// Vectored into via hardware PALcode instruction dispatch.
2666//
2667// Function:
2668// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
2669// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
2670// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
2671// MCES<3> <- R16<3> (DPC)
2672// MCES<4> <- R16<4> (DSC)
2673//
2674//
2675
2676 CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
2677Call_Pal_Wrmces:
2678 and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
2679 mfpr r14, pt_mces // Get current value
2680
2681 ornot r31, r13, r13 // Flip all the bits
2682 and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
2683
2684 and r14, r13, r1 // Update MCHK, SCE, PCE
2685 bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
2686
2687 or r1, r17, r1 // Update DPC and DSC
2688 mtpr r1, pt_mces // Write MCES back
2689
2690 nop // Pad to fix PT write->read restriction
2691
2692 nop
2693 hw_rei
2694
2695
2696
2697// CALL_PAL OPCDECs
2698
2699 CALL_PAL_PRIV(0x0012)
2700CallPal_OpcDec12:
2701 br r31, osfpal_calpal_opcdec
2702
2703 CALL_PAL_PRIV(0x0013)
2704CallPal_OpcDec13:
2705 br r31, osfpal_calpal_opcdec
2706
2707 CALL_PAL_PRIV(0x0014)
2708CallPal_OpcDec14:
2709 br r31, osfpal_calpal_opcdec
2710
2711 CALL_PAL_PRIV(0x0015)
2712CallPal_OpcDec15:
2713 br r31, osfpal_calpal_opcdec
2714
2715 CALL_PAL_PRIV(0x0016)
2716CallPal_OpcDec16:
2717 br r31, osfpal_calpal_opcdec
2718
2719 CALL_PAL_PRIV(0x0017)
2720CallPal_OpcDec17:
2721 br r31, osfpal_calpal_opcdec
2722
2723 CALL_PAL_PRIV(0x0018)
2724CallPal_OpcDec18:
2725 br r31, osfpal_calpal_opcdec
2726
2727 CALL_PAL_PRIV(0x0019)
2728CallPal_OpcDec19:
2729 br r31, osfpal_calpal_opcdec
2730
2731 CALL_PAL_PRIV(0x001A)
2732CallPal_OpcDec1A:
2733 br r31, osfpal_calpal_opcdec
2734
2735 CALL_PAL_PRIV(0x001B)
2736CallPal_OpcDec1B:
2737 br r31, osfpal_calpal_opcdec
2738
2739 CALL_PAL_PRIV(0x001C)
2740CallPal_OpcDec1C:
2741 br r31, osfpal_calpal_opcdec
2742
2743 CALL_PAL_PRIV(0x001D)
2744CallPal_OpcDec1D:
2745 br r31, osfpal_calpal_opcdec
2746
2747 CALL_PAL_PRIV(0x001E)
2748CallPal_OpcDec1E:
2749 br r31, osfpal_calpal_opcdec
2750
2751 CALL_PAL_PRIV(0x001F)
2752CallPal_OpcDec1F:
2753 br r31, osfpal_calpal_opcdec
2754
2755 CALL_PAL_PRIV(0x0020)
2756CallPal_OpcDec20:
2757 br r31, osfpal_calpal_opcdec
2758
2759 CALL_PAL_PRIV(0x0021)
2760CallPal_OpcDec21:
2761 br r31, osfpal_calpal_opcdec
2762
2763 CALL_PAL_PRIV(0x0022)
2764CallPal_OpcDec22:
2765 br r31, osfpal_calpal_opcdec
2766
2767 CALL_PAL_PRIV(0x0023)
2768CallPal_OpcDec23:
2769 br r31, osfpal_calpal_opcdec
2770
2771 CALL_PAL_PRIV(0x0024)
2772CallPal_OpcDec24:
2773 br r31, osfpal_calpal_opcdec
2774
2775 CALL_PAL_PRIV(0x0025)
2776CallPal_OpcDec25:
2777 br r31, osfpal_calpal_opcdec
2778
2779 CALL_PAL_PRIV(0x0026)
2780CallPal_OpcDec26:
2781 br r31, osfpal_calpal_opcdec
2782
2783 CALL_PAL_PRIV(0x0027)
2784CallPal_OpcDec27:
2785 br r31, osfpal_calpal_opcdec
2786
2787 CALL_PAL_PRIV(0x0028)
2788CallPal_OpcDec28:
2789 br r31, osfpal_calpal_opcdec
2790
2791 CALL_PAL_PRIV(0x0029)
2792CallPal_OpcDec29:
2793 br r31, osfpal_calpal_opcdec
2794
2795 CALL_PAL_PRIV(0x002A)
2796CallPal_OpcDec2A:
2797 br r31, osfpal_calpal_opcdec
2798
2799//
2800// wrfen - PALcode for wrfen instruction
2801//
2802// Entry:
2803// Vectored into via hardware PALcode instruction dispatch.
2804//
2805// Function:
2806// a0<0> -> ICSR<FPE>
2807// Store new FEN in PCB
2808// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16)
2809// are UNPREDICTABLE
2810//
2811// Issue: What about pending FP loads when FEN goes from on->off????
2812//
2813
2814 CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
2815Call_Pal_Wrfen:
2816 or r31, 1, r13 // Get a one
2817 mfpr r1, ev5__icsr // Get current FPE
2818
2819 sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
2820 and r16, 1, r16 // clean new fen
2821
2822 sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
2823 bic r1, r13, r1 // zero icsr<fpe>
2824
2825 or r1, r12, r1 // Or new FEN into ICSR
2826 mfpr r12, pt_pcbb // Get PCBB - E1
2827
2828 mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
2829 stl_p r16, osfpcb_q_fen(r12) // Store FEN in PCB.
2830
2831 mfpr r31, pt0 // Pad ICSR<FPE> write.
2832 mfpr r31, pt0
2833
2834 mfpr r31, pt0
2835// pvc_violate 225 // cuz PVC can't distinguish which bits changed
2836 hw_rei
2837
2838
2839 CALL_PAL_PRIV(0x002C)
2840CallPal_OpcDec2C:
2841 br r31, osfpal_calpal_opcdec
2842
2843//
2844// wrvptpr - PALcode for wrvptpr instruction
2845//
2846// Entry:
2847// Vectored into via hardware PALcode instruction dispatch.
2848//
2849// Function:
2850// vptptr <- a0 (r16)
2851//
2852
2853 CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
2854Call_Pal_Wrvptptr:
2855 mtpr r16, ev5__mvptbr // Load Mbox copy
2856 mtpr r16, ev5__ivptbr // Load Ibox copy
2857 nop // Pad IPR write
2858 nop
2859 hw_rei
2860
2861 CALL_PAL_PRIV(0x002E)
2862CallPal_OpcDec2E:
2863 br r31, osfpal_calpal_opcdec
2864
2865 CALL_PAL_PRIV(0x002F)
2866CallPal_OpcDec2F:
2867 br r31, osfpal_calpal_opcdec
2868
2869
2870//
2871// swpctx - PALcode for swpctx instruction
2872//
2873// Entry:
2874// hardware dispatch via callPal instruction
2875// R16 -> new pcb
2876//
2877// Function:
2878// dynamic state moved to old pcb
2879// new state loaded from new pcb
2880// pcbb pointer set
2881// old pcbb returned in R0
2882//
2883// Note: need to add perf monitor stuff
2884//
2885
2886 CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
2887Call_Pal_Swpctx:
2888 rpcc r13 // get cyccounter
2889 mfpr r0, pt_pcbb // get pcbb
2890
2891 ldq_p r22, osfpcb_q_fen(r16) // get new fen/pme
2892 ldq_p r23, osfpcb_l_cc(r16) // get new asn
2893
2894 srl r13, 32, r25 // move offset
2895 mfpr r24, pt_usp // get usp
2896
2897 stq_p r30, osfpcb_q_ksp(r0) // store old ksp
2898// pvc_violate 379 // stq_p can't trap except replay. only problem if mf same ipr in same shadow.
2899 mtpr r16, pt_pcbb // set new pcbb
2900
2901 stq_p r24, osfpcb_q_usp(r0) // store usp
2902 addl r13, r25, r25 // merge for new time
2903
2904 stl_p r25, osfpcb_l_cc(r0) // save time
2905 ldah r24, (1<<(icsr_v_fpe-16))(r31)
2906
2907 and r22, 1, r12 // isolate fen
2908 mfpr r25, icsr // get current icsr
2909
2910 lda r24, (1<<icsr_v_pmp)(r24)
2911 br r31, swpctx_cont
2912
2913//
2914// wrval - PALcode for wrval instruction
2915//
2916// Entry:
2917// Vectored into via hardware PALcode instruction dispatch.
2918//
2919// Function:
2920// sysvalue <- a0 (r16)
2921//
2922
2923 CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
2924Call_Pal_Wrval:
2925 nop
2926 mtpr r16, pt_sysval // Pad paltemp write
2927 nop
2928 nop
2929 hw_rei
2930
2931//
2932// rdval - PALcode for rdval instruction
2933//
2934// Entry:
2935// Vectored into via hardware PALcode instruction dispatch.
2936//
2937// Function:
2938// v0 (r0) <- sysvalue
2939//
2940
2941 CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
2942Call_Pal_Rdval:
2943 nop
2944 mfpr r0, pt_sysval
2945 nop
2946 hw_rei
2947
2948//
2949// tbi - PALcode for tbi instruction
2950//
2951// Entry:
2952// Vectored into via hardware PALcode instruction dispatch.
2953//
2954// Function:
2955// TB invalidate
2956// r16/a0 = TBI type
2957// r17/a1 = Va for TBISx instructions
2958//
2959
2960 CALL_PAL_PRIV(PAL_TBI_ENTRY)
2961Call_Pal_Tbi:
2962 addq r16, 2, r16 // change range to 0-2
2963 br r23, CALL_PAL_tbi_10_ // get our address
2964
2965CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
2966 lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
2967 sll r16, 4, r16 // * 16
2968 blbc r22, CALL_PAL_tbi_30_ // go rei, if not
2969
2970 addq r23, r16, r23 // addr of our code
2971//orig pvc_jsr tbi
2972 jmp r31, (r23) // and go do it
2973
2974CALL_PAL_tbi_30_:
2975 hw_rei
2976 nop
2977
2978//
2979// wrent - PALcode for wrent instruction
2980//
2981// Entry:
2982// Vectored into via hardware PALcode instruction dispatch.
2983//
2984// Function:
2985// Update ent* in paltemps
2986// r16/a0 = Address of entry routine
2987// r17/a1 = Entry Number 0..5
2988//
2989// r22, r23 trashed
2990//
2991
2992 CALL_PAL_PRIV(PAL_WRENT_ENTRY)
2993Call_Pal_Wrent:
2994 cmpult r17, 6, r22 // see if in range
2995 br r23, CALL_PAL_wrent_10_ // get our address
2996
2997CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
2998 blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
2999
3000 lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
3001 sll r17, 4, r17 // *16
3002
3003 addq r17, r23, r23 // Get address in table
3004//orig pvc_jsr wrent
3005 jmp r31, (r23) // and go do it
3006
3007CALL_PAL_wrent_30_:
3008 hw_rei // out of range, just return
3009
3010//
3011// swpipl - PALcode for swpipl instruction
3012//
3013// Entry:
3014// Vectored into via hardware PALcode instruction dispatch.
3015//
3016// Function:
3017// v0 (r0) <- PS<IPL>
3018// PS<IPL> <- a0<2:0> (r16)
3019//
3020// t8 (r22) is scratch
3021//
3022
3023 CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3024Call_Pal_Swpipl:
3025 and r16, osfps_m_ipl, r16 // clean New ipl
3026 mfpr r22, pt_intmask // get int mask
3027
3028 extbl r22, r16, r22 // get mask for this ipl
3029 bis r11, r31, r0 // return old ipl
3030
3031 bis r16, r31, r11 // set new ps
3032 mtpr r22, ev5__ipl // set new mask
3033
3034 mfpr r31, pt0 // pad ipl write
3035 mfpr r31, pt0 // pad ipl write
3036
3037 hw_rei // back
3038
3039//
3040// rdps - PALcode for rdps instruction
3041//
3042// Entry:
3043// Vectored into via hardware PALcode instruction dispatch.
3044//
3045// Function:
3046// v0 (r0) <- ps
3047//
3048
3049 CALL_PAL_PRIV(PAL_RDPS_ENTRY)
3050Call_Pal_Rdps:
3051 bis r11, r31, r0 // Fetch PALshadow PS
3052 nop // Must be 2 cycles long
3053 hw_rei
3054
3055//
3056// wrkgp - PALcode for wrkgp instruction
3057//
3058// Entry:
3059// Vectored into via hardware PALcode instruction dispatch.
3060//
3061// Function:
3062// kgp <- a0 (r16)
3063//
3064
3065 CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
3066Call_Pal_Wrkgp:
3067 nop
3068 mtpr r16, pt_kgp
3069 nop // Pad for pt write->read restriction
3070 nop
3071 hw_rei
3072
3073//
3074// wrusp - PALcode for wrusp instruction
3075//
3076// Entry:
3077// Vectored into via hardware PALcode instruction dispatch.
3078//
3079// Function:
3080// usp <- a0 (r16)
3081//
3082
3083 CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
3084Call_Pal_Wrusp:
3085 nop
3086 mtpr r16, pt_usp
3087 nop // Pad possible pt write->read restriction
3088 nop
3089 hw_rei
3090
3091//
3092// wrperfmon - PALcode for wrperfmon instruction
3093//
3094// Entry:
3095// Vectored into via hardware PALcode instruction dispatch.
3096//
3097//
3098// Function:
3099// Various control functions for the onchip performance counters
3100//
3101// option selector in r16
3102// option argument in r17
3103// returned status in r0
3104//
3105//
3106// r16 = 0 Disable performance monitoring for one or more cpu's
3107// r17 = 0 disable no counters
3108// r17 = bitmask disable counters specified in bit mask (1=disable)
3109//
3110// r16 = 1 Enable performance monitoring for one or more cpu's
3111// r17 = 0 enable no counters
3112// r17 = bitmask enable counters specified in bit mask (1=enable)
3113//
3114// r16 = 2 Mux select for one or more cpu's
3115// r17 = Mux selection (cpu specific)
3116// <24:19> bc_ctl<pm_mux_sel> field (see spec)
3117// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
3118//
3119// r16 = 3 Options
3120// r17 = (cpu specific)
3121// <0> = 0 log all processes
3122// <0> = 1 log only selected processes
3123// <30,9,8> mode select - ku,kp,kk
3124//
3125// r16 = 4 Interrupt frequency select
3126// r17 = (cpu specific) indicates interrupt frequencies desired for each
3127// counter, with "zero interrupts" being an option
3128// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
3129//
3130// r16 = 5 Read Counters
3131// r17 = na
3132// r0 = value (same format as ev5 pmctr)
3133// <0> = 0 Read failed
3134// <0> = 1 Read succeeded
3135//
3136// r16 = 6 Write Counters
3137// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
3138//
3139// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
3140// r17 = 0 enable no counters
3141// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
3142//
3143//=============================================================================
3144//Assumptions:
3145//PMCTR_CTL:
3146//
3147// <15:14> CTL0 -- encoded frequency select and enable - CTR0
3148// <13:12> CTL1 -- " - CTR1
3149// <11:10> CTL2 -- " - CTR2
3150//
3151// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
3152// <7:6> FRQ1 -- frequency select for CTR1
3153// <5:4> FRQ2 -- frequency select for CTR2
3154//
3155// <0> all vs. select processes (0=all,1=select)
3156//
3157// where
3158// FRQx<1:0>
3159// 0 1 disable interrupt
3160// 1 0 frequency = 65536 (16384 for ctr2)
3161// 1 1 frequency = 256
3162// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
3163//
3164//=============================================================================
3165//
3166 CALL_PAL_PRIV(0x0039)
3167// unsupported in Hudson code .. pboyle Nov/95
3168CALL_PAL_Wrperfmon:
3169 // "real" performance monitoring code
3170 cmpeq r16, 1, r0 // check for enable
3171 bne r0, perfmon_en // br if requested to enable
3172
3173 cmpeq r16, 2, r0 // check for mux ctl
3174 bne r0, perfmon_muxctl // br if request to set mux controls
3175
3176 cmpeq r16, 3, r0 // check for options
3177 bne r0, perfmon_ctl // br if request to set options
3178
3179 cmpeq r16, 4, r0 // check for interrupt frequency select
3180 bne r0, perfmon_freq // br if request to change frequency select
3181
3182 cmpeq r16, 5, r0 // check for counter read request
3183 bne r0, perfmon_rd // br if request to read counters
3184
3185 cmpeq r16, 6, r0 // check for counter write request
3186 bne r0, perfmon_wr // br if request to write counters
3187
3188 cmpeq r16, 7, r0 // check for counter clear/enable request
3189 bne r0, perfmon_enclr // br if request to clear/enable counters
3190
3191 beq r16, perfmon_dis // br if requested to disable (r16=0)
3192 br r31, perfmon_unknown // br if unknown request
3193
3194//
3195// rdusp - PALcode for rdusp instruction
3196//
3197// Entry:
3198// Vectored into via hardware PALcode instruction dispatch.
3199//
3200// Function:
3201// v0 (r0) <- usp
3202//
3203
3204 CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
3205Call_Pal_Rdusp:
3206 nop
3207 mfpr r0, pt_usp
3208 hw_rei
3209
3210
3211 CALL_PAL_PRIV(0x003B)
3212CallPal_OpcDec3B:
3213 br r31, osfpal_calpal_opcdec
3214
3215//
3216// whami - PALcode for whami instruction
3217//
3218// Entry:
3219// Vectored into via hardware PALcode instruction dispatch.
3220//
3221// Function:
3222// v0 (r0) <- whami
3223//
3224 CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
3225Call_Pal_Whami:
3226 nop
3227 mfpr r0, pt_whami // Get Whami
3228 extbl r0, 1, r0 // Isolate just whami bits
3229 hw_rei
3230
3231//
3232// retsys - PALcode for retsys instruction
3233//
3234// Entry:
3235// Vectored into via hardware PALcode instruction dispatch.
3236// 00(sp) contains return pc
3237// 08(sp) contains r29
3238//
3239// Function:
3240// Return from system call.
3241// mode switched from kern to user.
3242// stacks swapped, ugp, upc restored.
3243// r23, r25 junked
3244//
3245
3246 CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
3247Call_Pal_Retsys:
3248 lda r25, osfsf_c_size(sp) // pop stack
3249 bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
3250
3251 mfpr r14, exc_addr // save exc_addr in case of fault
3252 ldq r23, osfsf_pc(sp) // get pc
3253
3254 ldq r29, osfsf_gp(sp) // get gp
3255 stl_c r31, -4(sp) // clear lock_flag
3256
3257 lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
3258 mfpr r30, pt_usp // get users stack
3259
3260 bic r23, 3, r23 // clean return pc
3261 mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
3262
3263 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3264 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3265
3266 mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
3267 mtpr r25, pt_ksp // save kern stack
3268
3269 rc r31 // clear inter_flag
3270// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
3271 hw_rei_spe // and back
3272
3273
3274 CALL_PAL_PRIV(0x003E)
3275CallPal_OpcDec3E:
3276 br r31, osfpal_calpal_opcdec
3277
3278//
3279// rti - PALcode for rti instruction
3280//
3281// Entry:
3282// Vectored into via hardware PALcode instruction dispatch.
3283//
3284// Function:
3285// 00(sp) -> ps
3286// 08(sp) -> pc
3287// 16(sp) -> r29 (gp)
3288// 24(sp) -> r16 (a0)
3289// 32(sp) -> r17 (a1)
3290// 40(sp) -> r18 (a3)
3291//
3292
3293 CALL_PAL_PRIV(PAL_RTI_ENTRY)
3294 /* called once by platform_tlaser */
3295 .globl Call_Pal_Rti
3296Call_Pal_Rti:
3297 lda r25, osfsf_c_size(sp) // get updated sp
3298 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
3299
3300 mfpr r14, exc_addr // save PC in case of fault
3301 rc r31 // clear intr_flag
3302
3303 ldq r12, -6*8(r25) // get ps
3304 ldq r13, -5*8(r25) // pc
3305
3306 ldq r18, -1*8(r25) // a2
3307 ldq r17, -2*8(r25) // a1
3308
3309 ldq r16, -3*8(r25) // a0
3310 ldq r29, -4*8(r25) // gp
3311
3312 bic r13, 3, r13 // clean return pc
3313 stl_c r31, -4(r25) // clear lock_flag
3314
3315 and r12, osfps_m_mode, r11 // get mode
3316 mtpr r13, exc_addr // set return address
3317
3318 beq r11, rti_to_kern // br if rti to Kern
3319 br r31, rti_to_user // out of call_pal space
3320
3321
3322///////////////////////////////////////////////////
3323// Start the Unprivileged CALL_PAL Entry Points
3324///////////////////////////////////////////////////
3325
3326//
3327// bpt - PALcode for bpt instruction
3328//
3329// Entry:
3330// Vectored into via hardware PALcode instruction dispatch.
3331//
3332// Function:
3333// Build stack frame
3334// a0 <- code
3335// a1 <- unpred
3336// a2 <- unpred
3337// vector via entIF
3338//
3339//
3340//
3341 .text 1
3342// . = 0x3000
3343 CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
3344Call_Pal_Bpt:
3345 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3346 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3347
3348 bis r11, r31, r12 // Save PS for stack write
3349 bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
3350
3351 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3352 // no virt ref for next 2 cycles
3353 mtpr r30, pt_usp // save user stack
3354
3355 bis r31, r31, r11 // Set new PS
3356 mfpr r30, pt_ksp
3357
3358CALL_PAL_bpt_10_:
3359 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3360 mfpr r14, exc_addr // get pc
3361
3362 stq r16, osfsf_a0(sp) // save regs
3363 bis r31, osf_a0_bpt, r16 // set a0
3364
3365 stq r17, osfsf_a1(sp) // a1
3366 br r31, bpt_bchk_common // out of call_pal space
3367
3368
3369//
3370// bugchk - PALcode for bugchk instruction
3371//
3372// Entry:
3373// Vectored into via hardware PALcode instruction dispatch.
3374//
3375// Function:
3376// Build stack frame
3377// a0 <- code
3378// a1 <- unpred
3379// a2 <- unpred
3380// vector via entIF
3381//
3382//
3383//
3384 CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
3385Call_Pal_Bugchk:
3386 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3387 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3388
3389 bis r11, r31, r12 // Save PS for stack write
3390 bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
3391
3392 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3393 // no virt ref for next 2 cycles
3394 mtpr r30, pt_usp // save user stack
3395
3396 bis r31, r31, r11 // Set new PS
3397 mfpr r30, pt_ksp
3398
3399CALL_PAL_bugchk_10_:
3400 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3401 mfpr r14, exc_addr // get pc
3402
3403 stq r16, osfsf_a0(sp) // save regs
3404 bis r31, osf_a0_bugchk, r16 // set a0
3405
3406 stq r17, osfsf_a1(sp) // a1
3407 br r31, bpt_bchk_common // out of call_pal space
3408
3409
3410 CALL_PAL_UNPRIV(0x0082)
3411CallPal_OpcDec82:
3412 br r31, osfpal_calpal_opcdec
3413
3414//
3415// callsys - PALcode for callsys instruction
3416//
3417// Entry:
3418// Vectored into via hardware PALcode instruction dispatch.
3419//
3420// Function:
3421// Switch mode to kernel and build a callsys stack frame.
3422// sp = ksp
3423// gp = kgp
3424// t8 - t10 (r22-r24) trashed
3425//
3426//
3427//
3428 CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
3429Call_Pal_Callsys:
3430
3431 and r11, osfps_m_mode, r24 // get mode
3432 mfpr r22, pt_ksp // get ksp
3433
3434 beq r24, sys_from_kern // sysCall from kern is not allowed
3435 mfpr r12, pt_entsys // get address of callSys routine
3436
3437//
3438// from here on we know we are in user going to Kern
3439//
3440 mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3441 mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3442
3443 bis r31, r31, r11 // PS=0 (mode=kern)
3444 mfpr r23, exc_addr // get pc
3445
3446 mtpr r30, pt_usp // save usp
3447 lda sp, 0-osfsf_c_size(r22)// set new sp
3448
3449 stq r29, osfsf_gp(sp) // save user gp/r29
3450 stq r24, osfsf_ps(sp) // save ps
3451
3452 stq r23, osfsf_pc(sp) // save pc
3453 mtpr r12, exc_addr // set address
3454 // 1 cycle to hw_rei
3455
3456 mfpr r29, pt_kgp // get the kern gp/r29
3457
3458 hw_rei_spe // and off we go!
3459
3460
3461 CALL_PAL_UNPRIV(0x0084)
3462CallPal_OpcDec84:
3463 br r31, osfpal_calpal_opcdec
3464
3465 CALL_PAL_UNPRIV(0x0085)
3466CallPal_OpcDec85:
3467 br r31, osfpal_calpal_opcdec
3468
3469//
3470// imb - PALcode for imb instruction
3471//
3472// Entry:
3473// Vectored into via hardware PALcode instruction dispatch.
3474//
3475// Function:
3476// Flush the writebuffer and flush the Icache
3477//
3478//
3479//
3480 CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
3481Call_Pal_Imb:
3482 mb // Clear the writebuffer
3483 mfpr r31, ev5__mcsr // Sync with clear
3484 nop
3485 nop
3486 br r31, pal_ic_flush // Flush Icache
3487
3488
3489// CALL_PAL OPCDECs
3490
3491 CALL_PAL_UNPRIV(0x0087)
3492CallPal_OpcDec87:
3493 br r31, osfpal_calpal_opcdec
3494
3495 CALL_PAL_UNPRIV(0x0088)
3496CallPal_OpcDec88:
3497 br r31, osfpal_calpal_opcdec
3498
3499 CALL_PAL_UNPRIV(0x0089)
3500CallPal_OpcDec89:
3501 br r31, osfpal_calpal_opcdec
3502
3503 CALL_PAL_UNPRIV(0x008A)
3504CallPal_OpcDec8A:
3505 br r31, osfpal_calpal_opcdec
3506
3507 CALL_PAL_UNPRIV(0x008B)
3508CallPal_OpcDec8B:
3509 br r31, osfpal_calpal_opcdec
3510
3511 CALL_PAL_UNPRIV(0x008C)
3512CallPal_OpcDec8C:
3513 br r31, osfpal_calpal_opcdec
3514
3515 CALL_PAL_UNPRIV(0x008D)
3516CallPal_OpcDec8D:
3517 br r31, osfpal_calpal_opcdec
3518
3519 CALL_PAL_UNPRIV(0x008E)
3520CallPal_OpcDec8E:
3521 br r31, osfpal_calpal_opcdec
3522
3523 CALL_PAL_UNPRIV(0x008F)
3524CallPal_OpcDec8F:
3525 br r31, osfpal_calpal_opcdec
3526
3527 CALL_PAL_UNPRIV(0x0090)
3528CallPal_OpcDec90:
3529 br r31, osfpal_calpal_opcdec
3530
3531 CALL_PAL_UNPRIV(0x0091)
3532CallPal_OpcDec91:
3533 br r31, osfpal_calpal_opcdec
3534
3535 CALL_PAL_UNPRIV(0x0092)
3536CallPal_OpcDec92:
3537 br r31, osfpal_calpal_opcdec
3538
3539 CALL_PAL_UNPRIV(0x0093)
3540CallPal_OpcDec93:
3541 br r31, osfpal_calpal_opcdec
3542
3543 CALL_PAL_UNPRIV(0x0094)
3544CallPal_OpcDec94:
3545 br r31, osfpal_calpal_opcdec
3546
3547 CALL_PAL_UNPRIV(0x0095)
3548CallPal_OpcDec95:
3549 br r31, osfpal_calpal_opcdec
3550
3551 CALL_PAL_UNPRIV(0x0096)
3552CallPal_OpcDec96:
3553 br r31, osfpal_calpal_opcdec
3554
3555 CALL_PAL_UNPRIV(0x0097)
3556CallPal_OpcDec97:
3557 br r31, osfpal_calpal_opcdec
3558
3559 CALL_PAL_UNPRIV(0x0098)
3560CallPal_OpcDec98:
3561 br r31, osfpal_calpal_opcdec
3562
3563 CALL_PAL_UNPRIV(0x0099)
3564CallPal_OpcDec99:
3565 br r31, osfpal_calpal_opcdec
3566
3567 CALL_PAL_UNPRIV(0x009A)
3568CallPal_OpcDec9A:
3569 br r31, osfpal_calpal_opcdec
3570
3571 CALL_PAL_UNPRIV(0x009B)
3572CallPal_OpcDec9B:
3573 br r31, osfpal_calpal_opcdec
3574
3575 CALL_PAL_UNPRIV(0x009C)
3576CallPal_OpcDec9C:
3577 br r31, osfpal_calpal_opcdec
3578
3579 CALL_PAL_UNPRIV(0x009D)
3580CallPal_OpcDec9D:
3581 br r31, osfpal_calpal_opcdec
3582
3583//
3584// rdunique - PALcode for rdunique instruction
3585//
3586// Entry:
3587// Vectored into via hardware PALcode instruction dispatch.
3588//
3589// Function:
3590// v0 (r0) <- unique
3591//
3592//
3593//
3594 CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
3595CALL_PALrdunique_:
3596 mfpr r0, pt_pcbb // get pcb pointer
3597 ldq_p r0, osfpcb_q_unique(r0) // get new value
3598
3599 hw_rei
3600
3601//
3602// wrunique - PALcode for wrunique instruction
3603//
3604// Entry:
3605// Vectored into via hardware PALcode instruction dispatch.
3606//
3607// Function:
3608// unique <- a0 (r16)
3609//
3610//
3611//
3612CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
3613CALL_PAL_Wrunique:
3614 nop
3615 mfpr r12, pt_pcbb // get pcb pointer
3616 stq_p r16, osfpcb_q_unique(r12)// get new value
3617 nop // Pad palshadow write
3618 hw_rei // back
3619
3620// CALL_PAL OPCDECs
3621
3622 CALL_PAL_UNPRIV(0x00A0)
3623CallPal_OpcDecA0:
3624 br r31, osfpal_calpal_opcdec
3625
3626 CALL_PAL_UNPRIV(0x00A1)
3627CallPal_OpcDecA1:
3628 br r31, osfpal_calpal_opcdec
3629
3630 CALL_PAL_UNPRIV(0x00A2)
3631CallPal_OpcDecA2:
3632 br r31, osfpal_calpal_opcdec
3633
3634 CALL_PAL_UNPRIV(0x00A3)
3635CallPal_OpcDecA3:
3636 br r31, osfpal_calpal_opcdec
3637
3638 CALL_PAL_UNPRIV(0x00A4)
3639CallPal_OpcDecA4:
3640 br r31, osfpal_calpal_opcdec
3641
3642 CALL_PAL_UNPRIV(0x00A5)
3643CallPal_OpcDecA5:
3644 br r31, osfpal_calpal_opcdec
3645
3646 CALL_PAL_UNPRIV(0x00A6)
3647CallPal_OpcDecA6:
3648 br r31, osfpal_calpal_opcdec
3649
3650 CALL_PAL_UNPRIV(0x00A7)
3651CallPal_OpcDecA7:
3652 br r31, osfpal_calpal_opcdec
3653
3654 CALL_PAL_UNPRIV(0x00A8)
3655CallPal_OpcDecA8:
3656 br r31, osfpal_calpal_opcdec
3657
3658 CALL_PAL_UNPRIV(0x00A9)
3659CallPal_OpcDecA9:
3660 br r31, osfpal_calpal_opcdec
3661
3662
3663//
3664// gentrap - PALcode for gentrap instruction
3665//
3666// CALL_PAL_gentrap:
3667// Entry:
3668// Vectored into via hardware PALcode instruction dispatch.
3669//
3670// Function:
3671// Build stack frame
3672// a0 <- code
3673// a1 <- unpred
3674// a2 <- unpred
3675// vector via entIF
3676//
3677//
3678
3679 CALL_PAL_UNPRIV(0x00AA)
3680// unsupported in Hudson code .. pboyle Nov/95
3681CALL_PAL_gentrap:
3682 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3683 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3684
3685 bis r11, r31, r12 // Save PS for stack write
3686 bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
3687
3688 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3689 // no virt ref for next 2 cycles
3690 mtpr r30, pt_usp // save user stack
3691
3692 bis r31, r31, r11 // Set new PS
3693 mfpr r30, pt_ksp
3694
3695CALL_PAL_gentrap_10_:
3696 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3697 mfpr r14, exc_addr // get pc
3698
3699 stq r16, osfsf_a0(sp) // save regs
3700 bis r31, osf_a0_gentrap, r16// set a0
3701
3702 stq r17, osfsf_a1(sp) // a1
3703 br r31, bpt_bchk_common // out of call_pal space
3704
3705
3706// CALL_PAL OPCDECs
3707
3708 CALL_PAL_UNPRIV(0x00AB)
3709CallPal_OpcDecAB:
3710 br r31, osfpal_calpal_opcdec
3711
3712 CALL_PAL_UNPRIV(0x00AC)
3713CallPal_OpcDecAC:
3714 br r31, osfpal_calpal_opcdec
3715
3716 CALL_PAL_UNPRIV(0x00AD)
3717CallPal_OpcDecAD:
3718 br r31, osfpal_calpal_opcdec
3719
3720 CALL_PAL_UNPRIV(0x00AE)
3721CallPal_OpcDecAE:
3722 br r31, osfpal_calpal_opcdec
3723
3724 CALL_PAL_UNPRIV(0x00AF)
3725CallPal_OpcDecAF:
3726 br r31, osfpal_calpal_opcdec
3727
3728 CALL_PAL_UNPRIV(0x00B0)
3729CallPal_OpcDecB0:
3730 br r31, osfpal_calpal_opcdec
3731
3732 CALL_PAL_UNPRIV(0x00B1)
3733CallPal_OpcDecB1:
3734 br r31, osfpal_calpal_opcdec
3735
3736 CALL_PAL_UNPRIV(0x00B2)
3737CallPal_OpcDecB2:
3738 br r31, osfpal_calpal_opcdec
3739
3740 CALL_PAL_UNPRIV(0x00B3)
3741CallPal_OpcDecB3:
3742 br r31, osfpal_calpal_opcdec
3743
3744 CALL_PAL_UNPRIV(0x00B4)
3745CallPal_OpcDecB4:
3746 br r31, osfpal_calpal_opcdec
3747
3748 CALL_PAL_UNPRIV(0x00B5)
3749CallPal_OpcDecB5:
3750 br r31, osfpal_calpal_opcdec
3751
3752 CALL_PAL_UNPRIV(0x00B6)
3753CallPal_OpcDecB6:
3754 br r31, osfpal_calpal_opcdec
3755
3756 CALL_PAL_UNPRIV(0x00B7)
3757CallPal_OpcDecB7:
3758 br r31, osfpal_calpal_opcdec
3759
3760 CALL_PAL_UNPRIV(0x00B8)
3761CallPal_OpcDecB8:
3762 br r31, osfpal_calpal_opcdec
3763
3764 CALL_PAL_UNPRIV(0x00B9)
3765CallPal_OpcDecB9:
3766 br r31, osfpal_calpal_opcdec
3767
3768 CALL_PAL_UNPRIV(0x00BA)
3769CallPal_OpcDecBA:
3770 br r31, osfpal_calpal_opcdec
3771
3772 CALL_PAL_UNPRIV(0x00BB)
3773CallPal_OpcDecBB:
3774 br r31, osfpal_calpal_opcdec
3775
3776 CALL_PAL_UNPRIV(0x00BC)
3777CallPal_OpcDecBC:
3778 br r31, osfpal_calpal_opcdec
3779
3780 CALL_PAL_UNPRIV(0x00BD)
3781CallPal_OpcDecBD:
3782 br r31, osfpal_calpal_opcdec
3783
3784 CALL_PAL_UNPRIV(0x00BE)
3785CallPal_OpcDecBE:
3786 br r31, osfpal_calpal_opcdec
3787
3788 CALL_PAL_UNPRIV(0x00BF)
3789CallPal_OpcDecBF:
3790 // MODIFIED BY EGH 2/25/04
3791 br r31, copypal_impl
3792
3793
3794/*======================================================================*/
3795/* OSF/1 CALL_PAL CONTINUATION AREA */
3796/*======================================================================*/
3797
3798 .text 2
3799
3800 . = 0x4000
3801
3802
3803// Continuation of MTPR_PERFMON
3804 ALIGN_BLOCK
3805 // "real" performance monitoring code
3806// mux ctl
3807perfmon_muxctl:
3808 lda r8, 1(r31) // get a 1
3809 sll r8, pmctr_v_sel0, r8 // move to sel0 position
3810 or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
3811 and r17, r8, r25 // isolate pmctr mux select bits
3812 mfpr r0, ev5__pmctr
3813 bic r0, r8, r0 // clear old mux select bits
3814 or r0,r25, r25 // or in new mux select bits
3815 mtpr r25, ev5__pmctr
3816
3817 // ok, now tackle cbox mux selects
3818 ldah r14, 0xfff0(r31)
3819 zap r14, 0xE0, r14 // Get Cbox IPR base
3820//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
3821// adapted from ev5_pal_macros.mar
3822 mfpr r16, pt_impure
3823 lda r16, CNS_Q_IPR(r16)
3824 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
3825
3826 lda r8, 0x3F(r31) // build mux select mask
3827 sll r8, bc_ctl_v_pm_mux_sel, r8
3828
3829 and r17, r8, r25 // isolate bc_ctl mux select bits
3830 bic r16, r8, r16 // isolate old mux select bits
3831 or r16, r25, r25 // create new bc_ctl
3832 mb // clear out cbox for future ipr write
3833 stq_p r25, ev5__bc_ctl(r14) // store to cbox ipr
3834 mb // clear out cbox for future ipr write
3835
3836//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
3837// adapted from ev5_pal_macros.mar
3838 mfpr r16, pt_impure
3839 lda r16, CNS_Q_IPR(r16)
3840 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
3841
3842 br r31, perfmon_success
3843
3844
3845// requested to disable perf monitoring
3846perfmon_dis:
3847 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3848perfmon_dis_ctr0: // and begin with ctr0
3849 blbc r17, perfmon_dis_ctr1 // do not disable ctr0
3850 lda r8, 3(r31)
3851 sll r8, pmctr_v_ctl0, r8
3852 bic r14, r8, r14 // disable ctr0
3853perfmon_dis_ctr1:
3854 srl r17, 1, r17
3855 blbc r17, perfmon_dis_ctr2 // do not disable ctr1
3856 lda r8, 3(r31)
3857 sll r8, pmctr_v_ctl1, r8
3858 bic r14, r8, r14 // disable ctr1
3859perfmon_dis_ctr2:
3860 srl r17, 1, r17
3861 blbc r17, perfmon_dis_update // do not disable ctr2
3862 lda r8, 3(r31)
3863 sll r8, pmctr_v_ctl2, r8
3864 bic r14, r8, r14 // disable ctr2
3865perfmon_dis_update:
3866 mtpr r14, ev5__pmctr // update pmctr ipr
3867//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
3868// adapted from ev5_pal_macros.mar
3869//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
3870 mfpr r25, pt_impure
3871 lda r25, CNS_Q_IPR(r25)
3872 RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
3873
3874 lda r17, 0x3F(r31) // build mask
3875 sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
3876 and r14, r17, r14 // isolate ctl bits
3877 bic r8, r17, r8 // clear out old ctl bits
3878 or r14, r8, r14 // create shadow ctl bits
3879//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
3880//adjusted impure pointer still in r25
3881 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
3882
3883 br r31, perfmon_success
3884
3885
3886// requested to enable perf monitoring
3887//;the following code can be greatly simplified for pass2, but should work fine as is.
3888
3889
3890perfmon_enclr:
3891 lda r9, 1(r31) // set enclr flag
3892 br perfmon_en_cont
3893
3894perfmon_en:
3895 bis r31, r31, r9 // clear enclr flag
3896
3897perfmon_en_cont:
3898 mfpr r8, pt_pcbb // get PCB base
3899//orig get_pmctr_ctl r25, r25
3900 mfpr r25, pt_impure
3901 lda r25, CNS_Q_IPR(r25)
3902 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
3903
3904 ldq_p r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
3905 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3906 srl r16, osfpcb_v_pme, r16 // get pme bit
3907 mfpr r13, icsr
3908 and r16, 1, r16 // isolate pme bit
3909
3910 // this code only needed in pass2 and later
3911 lda r12, 1<<icsr_v_pmp(r31) // pb
3912 bic r13, r12, r13 // clear pmp bit
3913 sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
3914 or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
3915 mtpr r13, icsr // update icsr
3916
3917 bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
3918
3919 sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
3920 bis r14, r31, r13 // copy pmctr
3921
3922perfmon_en_ctr0: // and begin with ctr0
3923 blbc r17, perfmon_en_ctr1 // do not enable ctr0
3924
3925 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
3926 lda r8, 0xffff(r31)
3927 zapnot r8, 3, r8 // ctr0<15:0> mask
3928 sll r8, pmctr_v_ctr0, r8
3929 bic r14, r8, r14 // clear ctr bits
3930 bic r13, r8, r13 // clear ctr bits
3931
3932perfmon_en_noclr0:
3933//orig get_addr r8, 3<<pmctr_v_ctl0, r31
3934 LDLI(r8, (3<<pmctr_v_ctl0))
3935 and r25, r8, r12 //isolate frequency select bits for ctr0
3936 bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
3937 or r14,r12,r14 // or in new ctl0 bits
3938
3939perfmon_en_ctr1: // enable ctr1
3940 srl r17, 1, r17 // get ctr1 enable
3941 blbc r17, perfmon_en_ctr2 // do not enable ctr1
3942
3943 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
3944 lda r8, 0xffff(r31)
3945 zapnot r8, 3, r8 // ctr1<15:0> mask
3946 sll r8, pmctr_v_ctr1, r8
3947 bic r14, r8, r14 // clear ctr bits
3948 bic r13, r8, r13 // clear ctr bits
3949
3950perfmon_en_noclr1:
3951//orig get_addr r8, 3<<pmctr_v_ctl1, r31
3952 LDLI(r8, (3<<pmctr_v_ctl1))
3953 and r25, r8, r12 //isolate frequency select bits for ctr1
3954 bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
3955 or r14,r12,r14 // or in new ctl1 bits
3956
3957perfmon_en_ctr2: // enable ctr2
3958 srl r17, 1, r17 // get ctr2 enable
3959 blbc r17, perfmon_en_return // do not enable ctr2 - return
3960
3961 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
3962 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
3963 sll r8, pmctr_v_ctr2, r8
3964 bic r14, r8, r14 // clear ctr bits
3965 bic r13, r8, r13 // clear ctr bits
3966
3967perfmon_en_noclr2:
3968//orig get_addr r8, 3<<pmctr_v_ctl2, r31
3969 LDLI(r8, (3<<pmctr_v_ctl2))
3970 and r25, r8, r12 //isolate frequency select bits for ctr2
3971 bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
3972 or r14,r12,r14 // or in new ctl2 bits
3973
3974perfmon_en_return:
3975 cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
3976 // else only do the counter clears
3977 mtpr r13, ev5__pmctr // update pmctr ipr
3978
3979//;this code not needed for pass2 and later, but does not hurt to leave it in
3980 lda r8, 0x3F(r31)
3981//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
3982 mfpr r12, pt_impure
3983 lda r12, CNS_Q_IPR(r12)
3984 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
3985
3986 sll r8, pmctr_v_ctl2, r8 // build ctl mask
3987 and r8, r14, r14 // isolate new ctl bits
3988 bic r25, r8, r25 // clear out old ctl value
3989 or r25, r14, r14 // create new pmctr_ctl
3990//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
3991 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
3992
3993 br r31, perfmon_success
3994
3995
3996// options...
3997perfmon_ctl:
3998
3999// set mode
4000//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4001 mfpr r12, pt_impure
4002 lda r12, CNS_Q_IPR(r12)
4003 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4004
4005 // build mode mask for pmctr register
4006 LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
4007 mfpr r0, ev5__pmctr
4008 and r17, r8, r25 // isolate pmctr mode bits
4009 bic r0, r8, r0 // clear old mode bits
4010 or r0, r25, r25 // or in new mode bits
4011 mtpr r25, ev5__pmctr
4012
4013 // the following code will only be used in pass2, but should
4014 // not hurt anything if run in pass1.
4015 mfpr r8, icsr
4016 lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
4017 bic r8, r25, r8 // clear old pma bit
4018 cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
4019 or r8, r25, r8
4020 mtpr r8, icsr // 4 bubbles to hw_rei
4021 mfpr r31, pt0 // pad icsr write
4022 mfpr r31, pt0 // pad icsr write
4023
4024 // the following code not needed for pass2 and later, but
4025 // should work anyway.
4026 bis r14, 1, r14 // set for select processes
4027 blbs r17, perfmon_sp // branch if select processes
4028 bic r14, 1, r14 // all processes
4029perfmon_sp:
4030//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4031 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4032 br r31, perfmon_success
4033
4034// counter frequency select
4035perfmon_freq:
4036//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4037 mfpr r12, pt_impure
4038 lda r12, CNS_Q_IPR(r12)
4039 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4040
4041 lda r8, 0x3F(r31)
4042//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
4043// I guess this should be a shift of 4 bits from the above control register structure
4044#define pmctr_ctl_v_frq2_SHIFT 4
4045 sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
4046
4047 and r8, r17, r17
4048 bic r14, r8, r14 // clear out old frequency select bits
4049
4050 or r17, r14, r14 // or in new frequency select info
4051//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4052 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4053
4054 br r31, perfmon_success
4055
4056// read counters
4057perfmon_rd:
4058 mfpr r0, ev5__pmctr
4059 or r0, 1, r0 // or in return status
4060 hw_rei // back to user
4061
4062// write counters
4063perfmon_wr:
4064 mfpr r14, ev5__pmctr
4065 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4066 sll r8, pmctr_v_ctr2, r8
4067
4068 LDLI(r9, (0xFFFFFFFF)) // ctr2<15:0>,ctr1<15:0> mask
4069 sll r9, pmctr_v_ctr1, r9
4070 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
4071 bic r14, r8, r14 // clear ctr fields
4072 and r17, r8, r25 // clear all but ctr fields
4073 or r25, r14, r14 // write ctr fields
4074 mtpr r14, ev5__pmctr // update pmctr ipr
4075
4076 mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
4077
4078perfmon_success:
4079 or r31, 1, r0 // set success
4080 hw_rei // back to user
4081
4082perfmon_unknown:
4083 or r31, r31, r0 // set fail
4084 hw_rei // back to user
4085
4086
4087//////////////////////////////////////////////////////////
4088// Copy code
4089//////////////////////////////////////////////////////////
4090
4091copypal_impl:
4092 mov r16, r0
4093#ifdef CACHE_COPY
4094#ifndef CACHE_COPY_UNALIGNED
4095 and r16, 63, r8
4096 and r17, 63, r9
4097 bis r8, r9, r8
4098 bne r8, cache_copy_done
4099#endif
4100 bic r18, 63, r8
4101 and r18, 63, r18
4102 beq r8, cache_copy_done
4103cache_loop:
4104 ldf f17, 0(r16)
4105 stf f17, 0(r16)
4106 addq r17, 64, r17
4107 addq r16, 64, r16
4108 subq r8, 64, r8
4109 bne r8, cache_loop
4110cache_copy_done:
4111#endif
4112 ble r18, finished // if len <=0 we are finished
4113 ldq_u r8, 0(r17)
4114 xor r17, r16, r9
4115 and r9, 7, r9
4116 and r16, 7, r10
4117 bne r9, unaligned
4118 beq r10, aligned
4119 ldq_u r9, 0(r16)
4120 addq r18, r10, r18
4121 mskqh r8, r17, r8
4122 mskql r9, r17, r9
4123 bis r8, r9, r8
4124aligned:
4125 subq r18, 1, r10
4126 bic r10, 7, r10
4127 and r18, 7, r18
4128 beq r10, aligned_done
4129loop:
4130 stq_u r8, 0(r16)
4131 ldq_u r8, 8(r17)
4132 subq r10, 8, r10
4133 lda r16,8(r16)
4134 lda r17,8(r17)
4135 bne r10, loop
4136aligned_done:
4137 bne r18, few_left
4138 stq_u r8, 0(r16)
4139 br r31, finished
4140 few_left:
4141 mskql r8, r18, r10
4142 ldq_u r9, 0(r16)
4143 mskqh r9, r18, r9
4144 bis r10, r9, r10
4145 stq_u r10, 0(r16)
4146 br r31, finished
4147unaligned:
4148 addq r17, r18, r25
4149 cmpule r18, 8, r9
4150 bne r9, unaligned_few_left
4151 beq r10, unaligned_dest_aligned
4152 and r16, 7, r10
4153 subq r31, r10, r10
4154 addq r10, 8, r10
4155 ldq_u r9, 7(r17)
4156 extql r8, r17, r8
4157 extqh r9, r17, r9
4158 bis r8, r9, r12
4159 insql r12, r16, r12
4160 ldq_u r13, 0(r16)
4161 mskql r13, r16, r13
4162 bis r12, r13, r12
4163 stq_u r12, 0(r16)
4164 addq r16, r10, r16
4165 addq r17, r10, r17
4166 subq r18, r10, r18
4167 ldq_u r8, 0(r17)
4168unaligned_dest_aligned:
4169 subq r18, 1, r10
4170 bic r10, 7, r10
4171 and r18, 7, r18
4172 beq r10, unaligned_partial_left
4173unaligned_loop:
4174 ldq_u r9, 7(r17)
4175 lda r17, 8(r17)
4176 extql r8, r17, r12
4177 extqh r9, r17, r13
4178 subq r10, 8, r10
4179 bis r12, r13, r13
4180 stq r13, 0(r16)
4181 lda r16, 8(r16)
4182 beq r10, unaligned_second_partial_left
4183 ldq_u r8, 7(r17)
4184 lda r17, 8(r17)
4185 extql r9, r17, r12
4186 extqh r8, r17, r13
4187 bis r12, r13, r13
4188 subq r10, 8, r10
4189 stq r13, 0(r16)
4190 lda r16, 8(r16)
4191 bne r10, unaligned_loop
4192unaligned_partial_left:
4193 mov r8, r9
4194unaligned_second_partial_left:
4195 ldq_u r8, -1(r25)
4196 extql r9, r17, r9
4197 extqh r8, r17, r8
4198 bis r8, r9, r8
4199 bne r18, few_left
4200 stq_u r8, 0(r16)
4201 br r31, finished
4202unaligned_few_left:
4203 ldq_u r9, -1(r25)
4204 extql r8, r17, r8
4205 extqh r9, r17, r9
4206 bis r8, r9, r8
4207 insqh r8, r16, r9
4208 insql r8, r16, r8
4209 lda r12, -1(r31)
4210 mskql r12, r18, r13
4211 cmovne r13, r13, r12
4212 insqh r12, r16, r13
4213 insql r12, r16, r12
4214 addq r16, r18, r10
4215 ldq_u r14, 0(r16)
4216 ldq_u r25, -1(r10)
4217 bic r14, r12, r14
4218 bic r25, r13, r25
4219 and r8, r12, r8
4220 and r9, r13, r9
4221 bis r8, r14, r8
4222 bis r9, r25, r9
4223 stq_u r9, -1(r10)
4224 stq_u r8, 0(r16)
4225finished:
4226 hw_rei