osfpal.S (8026:680f5c014bed) osfpal.S (8029:442f90a944eb)
1/*
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006
3 * The Regents of The University of Michigan
4 * All Rights Reserved
2 * Copyright (c) 2003-2006 The Regents of The University of Michigan
3 * Copyright (c) 1992-1995 Hewlett-Packard Development Company
4 * All rights reserved.
5 *
5 *
6 * This code is part of the M5 simulator.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
7 *
16 *
8 * Permission is granted to use, copy, create derivative works and
9 * redistribute this software and such derivative works for any
10 * purpose, so long as the copyright notice above, this grant of
11 * permission, and the disclaimer below appear in all copies made; and
12 * so long as the name of The University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 *
28 *
16 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE
17 * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND
18 * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER
19 * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE
22 * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT,
23 * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM
24 * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
25 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGES.
27 *
28 * Modified for M5 by: Ali G. Saidi
29 * Nathan L. Binkert
29 * Authors: Ali G. Saidi
30 * Nathan L. Binkert
30 */
31
31 */
32
32/*
33 * Copyright 1992, 1993, 1994, 1995 Hewlett-Packard Development
34 * Company, L.P.
35 *
36 * Permission is hereby granted, free of charge, to any person
37 * obtaining a copy of this software and associated documentation
38 * files (the "Software"), to deal in the Software without
39 * restriction, including without limitation the rights to use, copy,
40 * modify, merge, publish, distribute, sublicense, and/or sell copies
41 * of the Software, and to permit persons to whom the Software is
42 * furnished to do so, subject to the following conditions:
43 *
44 * The above copyright notice and this permission notice shall be
45 * included in all copies or substantial portions of the Software.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
48 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
50 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
51 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
52 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
53 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
54 * SOFTWARE.
55 */
56
57// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
58// since we don't have a mechanism to expand the data structures.... pb Nov/95
59#include "ev5_defs.h"
60#include "ev5_impure.h"
61#include "ev5_alpha_defs.h"
62#include "ev5_paldef.h"
63#include "ev5_osfalpha_defs.h"
64#include "fromHudsonMacros.h"
65#include "fromHudsonOsf.h"
66#include "dc21164FromGasSources.h"
67
68#define DEBUGSTORE(c) nop
69
70#define DEBUG_EXC_ADDR()\
71 bsr r25, put_exc_addr; \
72 DEBUGSTORE(13) ; \
73 DEBUGSTORE(10)
74
75// This is the fix for the user-mode super page references causing the
76// machine to crash.
77#define hw_rei_spe hw_rei
78
79#define vmaj 1
80#define vmin 18
81#define vms_pal 1
82#define osf_pal 2
83#define pal_type osf_pal
84#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
85
86
87///////////////////////////
88// PALtemp register usage
89///////////////////////////
90
91// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
92// for these PALtemps:
93//
94// pt0 local scratch
95// pt1 local scratch
96// pt2 entUna pt_entUna
97// pt3 CPU specific impure area pointer pt_impure
98// pt4 memory management temp
99// pt5 memory management temp
100// pt6 memory management temp
101// pt7 entIF pt_entIF
102// pt8 intmask pt_intmask
103// pt9 entSys pt_entSys
104// pt10
105// pt11 entInt pt_entInt
106// pt12 entArith pt_entArith
107// pt13 reserved for system specific PAL
108// pt14 reserved for system specific PAL
109// pt15 reserved for system specific PAL
110// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami,
111// pt_mces
112// pt17 sysval pt_sysval
113// pt18 usp pt_usp
114// pt19 ksp pt_ksp
115// pt20 PTBR pt_ptbr
116// pt21 entMM pt_entMM
117// pt22 kgp pt_kgp
118// pt23 PCBB pt_pcbb
119//
120//
121
122
123/////////////////////////////
124// PALshadow register usage
125/////////////////////////////
126
127//
128// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
129// This maps the OSF PAL usage of R8 - R14 and R25:
130//
131// r8 ITBmiss/DTBmiss scratch
132// r9 ITBmiss/DTBmiss scratch
133// r10 ITBmiss/DTBmiss scratch
134// r11 PS
135// r12 local scratch
136// r13 local scratch
137// r14 local scratch
138// r25 local scratch
139//
140
141
142
143// .sbttl "PALcode configuration options"
144
145// There are a number of options that may be assembled into this version of
146// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
147// the following). The options that can be adjusted cause the resultant PALcode
148// to reflect the desired target system.
149
150// multiprocessor support can be enabled for a max of n processors by
151// setting the following to the number of processors on the system.
152// Note that this is really the max cpuid.
153
154#define max_cpuid 1
155#ifndef max_cpuid
156#define max_cpuid 8
157#endif
158
159#define osf_svmin 1
160#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
161
162//
163// RESET - Reset Trap Entry Point
164//
165// RESET - offset 0000
166// Entry:
167// Vectored into via hardware trap on reset, or branched to
168// on swppal.
169//
170// r0 = whami
171// r1 = pal_base
172// r2 = base of scratch area
173// r3 = halt code
174//
175//
176// Function:
177//
178//
179
180 .text 0
181 . = 0x0000
182 .globl _start
183 .globl Pal_Base
184_start:
185Pal_Base:
186 HDW_VECTOR(PAL_RESET_ENTRY)
187Trap_Reset:
188 nop
189 /*
190 * store into r1
191 */
192 br r1,sys_reset
193
194 // Specify PAL version info as a constant
195 // at a known location (reset + 8).
196
197 .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
198 .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
199 .long 0
200 .long 0
201pal_impure_start:
202 .quad 0
203pal_debug_ptr:
204 .quad 0 // reserved for debug pointer ; 20
205
206
207//
208// IACCVIO - Istream Access Violation Trap Entry Point
209//
210// IACCVIO - offset 0080
211// Entry:
212// Vectored into via hardware trap on Istream access violation or sign check error on PC.
213//
214// Function:
215// Build stack frame
216// a0 <- Faulting VA
217// a1 <- MMCSR (1 for ACV)
218// a2 <- -1 (for ifetch fault)
219// vector via entMM
220//
221
222 HDW_VECTOR(PAL_IACCVIO_ENTRY)
223Trap_Iaccvio:
224 DEBUGSTORE(0x42)
225 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
226 mtpr r31, ev5__ps // Set Ibox current mode to kernel
227
228 bis r11, r31, r12 // Save PS
229 bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
230
231
232 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
233 // no virt ref for next 2 cycles
234 mtpr r30, pt_usp // save user stack
235
236 bis r31, r31, r12 // Set new PS
237 mfpr r30, pt_ksp
238
239TRAP_IACCVIO_10_:
240 lda sp, 0-osfsf_c_size(sp)// allocate stack space
241 mfpr r14, exc_addr // get pc
242
243 stq r16, osfsf_a0(sp) // save regs
244 bic r14, 3, r16 // pass pc/va as a0
245
246 stq r17, osfsf_a1(sp) // a1
247 or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
248
249 stq r18, osfsf_a2(sp) // a2
250 mfpr r13, pt_entmm // get entry point
251
252 stq r11, osfsf_ps(sp) // save old ps
253 bis r12, r31, r11 // update ps
254
255 stq r16, osfsf_pc(sp) // save pc
256 stq r29, osfsf_gp(sp) // save gp
257
258 mtpr r13, exc_addr // load exc_addr with entMM
259 // 1 cycle to hw_rei
260 mfpr r29, pt_kgp // get the kgp
261
262 subq r31, 1, r18 // pass flag of istream, as a2
263 hw_rei_spe
264
265
266//
267// INTERRUPT - Interrupt Trap Entry Point
268//
269// INTERRUPT - offset 0100
270// Entry:
271// Vectored into via trap on hardware interrupt
272//
273// Function:
274// check for halt interrupt
275// check for passive release (current ipl geq requestor)
276// if necessary, switch to kernel mode push stack frame,
277// update ps (including current mode and ipl copies), sp, and gp
278// pass the interrupt info to the system module
279//
280//
281 HDW_VECTOR(PAL_INTERRUPT_ENTRY)
282Trap_Interrupt:
283 mfpr r13, ev5__intid // Fetch level of interruptor
284 mfpr r25, ev5__isr // Fetch interrupt summary register
285
286 srl r25, isr_v_hlt, r9 // Get HLT bit
287 mfpr r14, ev5__ipl
288
289 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
290 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
291
292 cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
293 bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
294
295 and r11, osfps_m_mode, r10 // get mode bit
296 beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
297
298 mtpr r30, pt_usp // save user stack
299 mfpr r30, pt_ksp // get kern stack
300
301TRAP_INTERRUPT_10_:
302 lda sp, (0-osfsf_c_size)(sp)// allocate stack space
303 mfpr r14, exc_addr // get pc
304
305 stq r11, osfsf_ps(sp) // save ps
306 stq r14, osfsf_pc(sp) // save pc
307
308 stq r29, osfsf_gp(sp) // push gp
309 stq r16, osfsf_a0(sp) // a0
310
311// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
312 mtpr r31, ev5__ps // Set Ibox current mode to kernel
313 stq r17, osfsf_a1(sp) // a1
314
315 stq r18, osfsf_a2(sp) // a2
316 subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
317
318 srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
319 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
320
321 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
322 bis r12, r31, r11 // set new ps
323
324 mfpr r12, pt_intmask
325 and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
326
327 /*
328 * Lance had space problems. We don't.
329 */
330 extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
331 mfpr r29, pt_kgp // update gp
332 mtpr r14, ev5__ipl // load the new IPL into Ibox
333 br r31, sys_interrupt // Go handle interrupt
334
335
336
337//
338// ITBMISS - Istream TBmiss Trap Entry Point
339//
340// ITBMISS - offset 0180
341// Entry:
342// Vectored into via hardware trap on Istream translation buffer miss.
343//
344// Function:
345// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
346// Can trap into DTBMISS_DOUBLE.
347// This routine can use the PALshadow registers r8, r9, and r10
348//
349//
350
351 HDW_VECTOR(PAL_ITB_MISS_ENTRY)
352Trap_Itbmiss:
353 // Real MM mapping
354 nop
355 mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
356
357 nop
358 mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
359
360pal_itb_ldq:
361 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
362 mtpr r10, exc_addr // Restore exc_address if there was a trap.
363
364 mfpr r31, ev5__va // Unlock VA in case there was a double miss
365 nop
366
367 and r8, osfpte_m_foe, r25 // Look for FOE set.
368 blbc r8, invalid_ipte_handler // PTE not valid.
369
370 nop
371 bne r25, foe_ipte_handler // FOE is set
372
373 nop
374 mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
375
376 hw_rei_stall //
377
378
379//
380// DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point
381//
382// DTBMISS_SINGLE - offset 0200
383// Entry:
384// Vectored into via hardware trap on Dstream single translation
385// buffer miss.
386//
387// Function:
388// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
389// Can trap into DTBMISS_DOUBLE.
390// This routine can use the PALshadow registers r8, r9, and r10
391//
392
393 HDW_VECTOR(PAL_DTB_MISS_ENTRY)
394Trap_Dtbmiss_Single:
395 mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
396 mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
397
398// DEBUGSTORE(0x45)
399// DEBUG_EXC_ADDR()
400 // Real MM mapping
401 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
402 mtpr r10, pt6 // Stash exc_addr away
403
404pal_dtb_ldq:
405 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
406 nop // Pad MF VA
407
408 mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
409 nop
410
411 mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
412 blbc r8, invalid_dpte_handler // Handle invalid PTE
413
414 mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
415 mfpr r10, pt6
416
417 // Following 2 instructions take 2 cycles
418 mtpr r10, exc_addr // Return linkage in case we trapped. E1.
419 mfpr r31, pt0 // Pad the write to dtb_tag
420
421 hw_rei // Done, return
422
423
424//
425// DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point
426//
427//
428// DTBMISS_DOUBLE - offset 0280
429// Entry:
430// Vectored into via hardware trap on Double TBmiss from single
431// miss flows.
432//
433// r8 - faulting VA
434// r9 - original MMstat
435// r10 - original exc_addr (both itb,dtb miss)
436// pt6 - original exc_addr (dtb miss flow only)
437// VA IPR - locked with original faulting VA
438//
439// Function:
440// Get PTE, if valid load TB and return.
441// If not valid then take TNV/ACV exception.
442//
443// pt4 and pt5 are reserved for this flow.
444//
445//
446//
447
448 HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
449Trap_Dtbmiss_double:
450 mtpr r8, pt4 // save r8 to do exc_addr check
451 mfpr r8, exc_addr
452 blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
453 mfpr r8, pt4 // restore r8
454 nop
455 mtpr r22, pt5 // Get some scratch space. E1.
456 // Due to virtual scheme, we can skip the first lookup and go
457 // right to fetch of level 2 PTE
458 sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
459 mtpr r21, pt4 // Get some scratch space. E1.
460
461 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
462 mfpr r21, pt_ptbr // Get physical address of the page table.
463
464 nop
465 addq r21, r22, r21 // Index into page table for level 2 PTE.
466
467 sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
468 ldq_p r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
469
470 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
471 blbc r21, double_pte_inv // Check for Invalid PTE.
472
473 srl r21, 32, r21 // extract PFN from PTE
474 sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
475
476 addq r21, r22, r21 // Index into page table for level 3 PTE.
477 nop
478
479 ldq_p r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
480 blbc r21, double_pte_inv // Check for invalid PTE.
481
482 mtpr r21, ev5__dtb_pte // Write the PTE. E0.
483 mfpr r22, pt5 // Restore scratch register
484
485 mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
486 mfpr r21, pt4 // Restore scratch register
487
488 nop // Pad write to tag.
489 nop
490
491 nop // Pad write to tag.
492 nop
493
494 hw_rei
495
496
497
498//
499// UNALIGN -- Dstream unalign trap
500//
501// UNALIGN - offset 0300
502// Entry:
503// Vectored into via hardware trap on unaligned Dstream reference.
504//
505// Function:
506// Build stack frame
507// a0 <- Faulting VA
508// a1 <- Opcode
509// a2 <- src/dst register number
510// vector via entUna
511//
512
513 HDW_VECTOR(PAL_UNALIGN_ENTRY)
514Trap_Unalign:
515/* DEBUGSTORE(0x47)*/
516 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
517 mtpr r31, ev5__ps // Set Ibox current mode to kernel
518
519 mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
520 mfpr r14, exc_addr // get pc
521
522 srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
523 blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
524
525 blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
526 // not set, must be a load
527 and r13, 0x1F, r8 // isolate ra
528
529 cmpeq r8, 0x1F, r8 // check for r31/F31
530 bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
531
532UNALIGN_NO_DISMISS:
533 bis r11, r31, r12 // Save PS
534 bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
535
536
537 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
538 // no virt ref for next 2 cycles
539 mtpr r30, pt_usp // save user stack
540
541 bis r31, r31, r12 // Set new PS
542 mfpr r30, pt_ksp
543
544UNALIGN_NO_DISMISS_10_:
545 mfpr r25, ev5__va // Unlock VA
546 lda sp, 0-osfsf_c_size(sp)// allocate stack space
547
548 mtpr r25, pt0 // Stash VA
549 stq r18, osfsf_a2(sp) // a2
550
551 stq r11, osfsf_ps(sp) // save old ps
552 srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
553
554 stq r29, osfsf_gp(sp) // save gp
555 addq r14, 4, r14 // inc PC past the ld/st
556
557 stq r17, osfsf_a1(sp) // a1
558 and r25, mm_stat_m_opcode, r17// Clean opocde for a1
559
560 stq r16, osfsf_a0(sp) // save regs
561 mfpr r16, pt0 // a0 <- va/unlock
562
563 stq r14, osfsf_pc(sp) // save pc
564 mfpr r25, pt_entuna // get entry point
565
566
567 bis r12, r31, r11 // update ps
568 br r31, unalign_trap_cont
569
570
571//
572// DFAULT - Dstream Fault Trap Entry Point
573//
574// DFAULT - offset 0380
575// Entry:
576// Vectored into via hardware trap on dstream fault or sign check
577// error on DVA.
578//
579// Function:
580// Ignore faults on FETCH/FETCH_M
581// Check for DFAULT in PAL
582// Build stack frame
583// a0 <- Faulting VA
584// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
585// a2 <- R/W
586// vector via entMM
587//
588//
589 HDW_VECTOR(PAL_D_FAULT_ENTRY)
590Trap_Dfault:
591// DEBUGSTORE(0x48)
592 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
593 mtpr r31, ev5__ps // Set Ibox current mode to kernel
594
595 mfpr r13, ev5__mm_stat // Get mmstat
596 mfpr r8, exc_addr // get pc, preserve r14
597
598 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
599 blbs r8, dfault_in_pal
600
601 bis r8, r31, r14 // move exc_addr to correct place
602 bis r11, r31, r12 // Save PS
603
604 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
605 // no virt ref for next 2 cycles
606 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
607
608 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
609 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
610
611 //dismiss exception if load to r31/f31
612 blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
613
614 // not a store or fetch, must be a load
615 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
616
617 and r9, 0x1F, r9 // isolate rnum
618 nop
619
620 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
621 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
622
623dfault_no_dismiss:
624 and r13, 0xf, r13 // Clean extra bits in mm_stat
625 bge r25, dfault_trap_cont // no stack swap needed if cm=kern
626
627
628 mtpr r30, pt_usp // save user stack
629 bis r31, r31, r12 // Set new PS
630
631 mfpr r30, pt_ksp
632 br r31, dfault_trap_cont
633
634
635//
636// MCHK - Machine Check Trap Entry Point
637//
638// MCHK - offset 0400
639// Entry:
640// Vectored into via hardware trap on machine check.
641//
642// Function:
643//
644//
645
646 HDW_VECTOR(PAL_MCHK_ENTRY)
647Trap_Mchk:
648 DEBUGSTORE(0x49)
649 mtpr r31, ic_flush_ctl // Flush the Icache
650 br r31, sys_machine_check
651
652
653//
654// OPCDEC - Illegal Opcode Trap Entry Point
655//
656// OPCDEC - offset 0480
657// Entry:
658// Vectored into via hardware trap on illegal opcode.
659//
660// Build stack frame
661// a0 <- code
662// a1 <- unpred
663// a2 <- unpred
664// vector via entIF
665//
666//
667
668 HDW_VECTOR(PAL_OPCDEC_ENTRY)
669Trap_Opcdec:
670 DEBUGSTORE(0x4a)
671//simos DEBUG_EXC_ADDR()
672 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
673 mtpr r31, ev5__ps // Set Ibox current mode to kernel
674
675 mfpr r14, exc_addr // get pc
676 blbs r14, pal_pal_bug_check // check opcdec in palmode
677
678 bis r11, r31, r12 // Save PS
679 bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
680
681
682 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
683 // no virt ref for next 2 cycles
684 mtpr r30, pt_usp // save user stack
685
686 bis r31, r31, r12 // Set new PS
687 mfpr r30, pt_ksp
688
689TRAP_OPCDEC_10_:
690 lda sp, 0-osfsf_c_size(sp)// allocate stack space
691 addq r14, 4, r14 // inc pc
692
693 stq r16, osfsf_a0(sp) // save regs
694 bis r31, osf_a0_opdec, r16 // set a0
695
696 stq r11, osfsf_ps(sp) // save old ps
697 mfpr r13, pt_entif // get entry point
698
699 stq r18, osfsf_a2(sp) // a2
700 stq r17, osfsf_a1(sp) // a1
701
702 stq r29, osfsf_gp(sp) // save gp
703 stq r14, osfsf_pc(sp) // save pc
704
705 bis r12, r31, r11 // update ps
706 mtpr r13, exc_addr // load exc_addr with entIF
707 // 1 cycle to hw_rei, E1
708
709 mfpr r29, pt_kgp // get the kgp, E1
710
711 hw_rei_spe // done, E1
712
713
714//
715// ARITH - Arithmetic Exception Trap Entry Point
716//
717// ARITH - offset 0500
718// Entry:
719// Vectored into via hardware trap on arithmetic excpetion.
720//
721// Function:
722// Build stack frame
723// a0 <- exc_sum
724// a1 <- exc_mask
725// a2 <- unpred
726// vector via entArith
727//
728//
729 HDW_VECTOR(PAL_ARITH_ENTRY)
730Trap_Arith:
731 DEBUGSTORE(0x4b)
732 and r11, osfps_m_mode, r12 // get mode bit
733 mfpr r31, ev5__va // unlock mbox
734
735 bis r11, r31, r25 // save ps
736 mfpr r14, exc_addr // get pc
737
738 nop
739 blbs r14, pal_pal_bug_check // arith trap from PAL
740
741 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
742 // no virt ref for next 2 cycles
743 beq r12, TRAP_ARITH_10_ // if zero we are in kern now
744
745 bis r31, r31, r25 // set the new ps
746 mtpr r30, pt_usp // save user stack
747
748 nop
749 mfpr r30, pt_ksp // get kern stack
750
751TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
752 mtpr r31, ev5__ps // Set Ibox current mode to kernel
753
754 nop // Pad current mode write and stq
755 mfpr r13, ev5__exc_sum // get the exc_sum
756
757 mfpr r12, pt_entarith
758 stq r14, osfsf_pc(sp) // save pc
759
760 stq r17, osfsf_a1(sp)
761 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
762
763 stq r11, osfsf_ps(sp) // save ps
764 bis r25, r31, r11 // set new ps
765
766 stq r16, osfsf_a0(sp) // save regs
767 srl r13, exc_sum_v_swc, r16 // shift data to correct position
768
769 stq r18, osfsf_a2(sp)
770// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
771 mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
772
773 stq r29, osfsf_gp(sp)
774 mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
775
776 mfpr r29, pt_kgp // get the kern gp - E1
777 hw_rei_spe // done - E1
778
779
780//
781// FEN - Illegal Floating Point Operation Trap Entry Point
782//
783// FEN - offset 0580
784// Entry:
785// Vectored into via hardware trap on illegal FP op.
786//
787// Function:
788// Build stack frame
789// a0 <- code
790// a1 <- unpred
791// a2 <- unpred
792// vector via entIF
793//
794//
795
796 HDW_VECTOR(PAL_FEN_ENTRY)
797Trap_Fen:
798 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
799 mtpr r31, ev5__ps // Set Ibox current mode to kernel
800
801 mfpr r14, exc_addr // get pc
802 blbs r14, pal_pal_bug_check // check opcdec in palmode
803
804 mfpr r13, ev5__icsr
805 nop
806
807 bis r11, r31, r12 // Save PS
808 bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
809
810 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
811 // no virt ref for next 2 cycles
812 mtpr r30, pt_usp // save user stack
813
814 bis r31, r31, r12 // Set new PS
815 mfpr r30, pt_ksp
816
817TRAP_FEN_10_:
818 lda sp, 0-osfsf_c_size(sp)// allocate stack space
819 srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
820
821
822 stq r16, osfsf_a0(sp) // save regs
823 mfpr r13, pt_entif // get entry point
824
825 stq r18, osfsf_a2(sp) // a2
826 stq r11, osfsf_ps(sp) // save old ps
827
828 stq r29, osfsf_gp(sp) // save gp
829 bis r12, r31, r11 // set new ps
830
831 stq r17, osfsf_a1(sp) // a1
832 blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
833
834 bis r31, osf_a0_fen, r16 // set a0
835 stq r14, osfsf_pc(sp) // save pc
836
837 mtpr r13, exc_addr // load exc_addr with entIF
838 // 1 cycle to hw_rei -E1
839
840 mfpr r29, pt_kgp // get the kgp -E1
841
842 hw_rei_spe // done -E1
843
844// FEN trap was taken, but the fault is really opcdec.
845 ALIGN_BRANCH
846fen_to_opcdec:
847 addq r14, 4, r14 // save PC+4
848 bis r31, osf_a0_opdec, r16 // set a0
849
850 stq r14, osfsf_pc(sp) // save pc
851 mtpr r13, exc_addr // load exc_addr with entIF
852 // 1 cycle to hw_rei
853
854 mfpr r29, pt_kgp // get the kgp
855 hw_rei_spe // done
856
857
858
859//////////////////////////////////////////////////////////////////////////////
860// Misc handlers - Start area for misc code.
861//////////////////////////////////////////////////////////////////////////////
862
863//
864// dfault_trap_cont
865// A dfault trap has been taken. The sp has been updated if necessary.
866// Push a stack frame a vector via entMM.
867//
868// Current state:
869// r12 - new PS
870// r13 - MMstat
871// VA - locked
872//
873//
874 ALIGN_BLOCK
875dfault_trap_cont:
876 lda sp, 0-osfsf_c_size(sp)// allocate stack space
877 mfpr r25, ev5__va // Fetch VA/unlock
878
879 stq r18, osfsf_a2(sp) // a2
880 and r13, 1, r18 // Clean r/w bit for a2
881
882 stq r16, osfsf_a0(sp) // save regs
883 bis r25, r31, r16 // a0 <- va
884
885 stq r17, osfsf_a1(sp) // a1
886 srl r13, 1, r17 // shift fault bits to right position
887
888 stq r11, osfsf_ps(sp) // save old ps
889 bis r12, r31, r11 // update ps
890
891 stq r14, osfsf_pc(sp) // save pc
892 mfpr r25, pt_entmm // get entry point
893
894 stq r29, osfsf_gp(sp) // save gp
895 cmovlbs r17, 1, r17 // a2. acv overrides fox.
896
897 mtpr r25, exc_addr // load exc_addr with entMM
898 // 1 cycle to hw_rei
899 mfpr r29, pt_kgp // get the kgp
900
901 hw_rei_spe // done
902
903//
904//unalign_trap_cont
905// An unalign trap has been taken. Just need to finish up a few things.
906//
907// Current state:
908// r25 - entUna
909// r13 - shifted MMstat
910//
911//
912 ALIGN_BLOCK
913unalign_trap_cont:
914 mtpr r25, exc_addr // load exc_addr with entUna
915 // 1 cycle to hw_rei
916
917
918 mfpr r29, pt_kgp // get the kgp
919 and r13, mm_stat_m_ra, r18 // Clean Ra for a2
920
921 hw_rei_spe // done
922
923
924
925//
926// dfault_in_pal
927// Dfault trap was taken, exc_addr points to a PAL PC.
928// r9 - mmstat<opcode> right justified
929// r8 - exception address
930//
931// These are the cases:
932// opcode was STQ -- from a stack builder, KSP not valid halt
933// r14 - original exc_addr
934// r11 - original PS
935// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
936// KSP not valid halt
937// r11 - original PS
938// r14 - original exc_addr
939// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
940// r11 - original PS
941// r14 - original exc_addr
942// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
943// r10 - original exc_addr
944// r11 - original PS
945//
946//
947//
948 ALIGN_BLOCK
949dfault_in_pal:
950 DEBUGSTORE(0x50)
951 bic r8, 3, r8 // Clean PC
952 mfpr r9, pal_base
953
954 mfpr r31, va // unlock VA
955
956 // if not real_mm, should never get here from miss flows
957
958 subq r9, r8, r8 // pal_base - offset
959
960 lda r9, pal_itb_ldq-pal_base(r8)
961 nop
962
963 beq r9, dfault_do_bugcheck
964 lda r9, pal_dtb_ldq-pal_base(r8)
965
966 beq r9, dfault_do_bugcheck
967
968//
969// KSP invalid halt case --
970ksp_inval_halt:
971 DEBUGSTORE(76)
972 bic r11, osfps_m_mode, r11 // set ps to kernel mode
973 mtpr r0, pt0
974
975 mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
976 mtpr r31, ips
977
978 mtpr r14, exc_addr // Set PC to instruction that caused trouble
979 bsr r0, pal_update_pcb // update the pcb
980
981 lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
982 br r31, sys_enter_console // enter the console
983
984 ALIGN_BRANCH
985dfault_do_bugcheck:
986 bis r10, r31, r14 // bugcheck expects exc_addr in r14
987 br r31, pal_pal_bug_check
988
989
990//
991// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
992// On entry -
993// r14 - exc_addr
994// VA is locked
995//
996//
997 ALIGN_BLOCK
998dfault_fetch_ldr31_err:
999 mtpr r11, ev5__dtb_cm
1000 mtpr r11, ev5__ps // Make sure ps hasn't changed
1001
1002 mfpr r31, va // unlock the mbox
1003 addq r14, 4, r14 // inc the pc to skip the fetch
1004
1005 mtpr r14, exc_addr // give ibox new PC
1006 mfpr r31, pt0 // pad exc_addr write
1007
1008 hw_rei
1009
1010
1011
1012 ALIGN_BLOCK
1013//
1014// sys_from_kern
1015// callsys from kernel mode - OS bugcheck machine check
1016//
1017//
1018sys_from_kern:
1019 mfpr r14, exc_addr // PC points to call_pal
1020 subq r14, 4, r14
1021
1022 lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
1023 br r31, pal_pal_mchk
1024
1025
1026// Continuation of long call_pal flows
1027//
1028// wrent_tbl
1029// Table to write *int in paltemps.
1030// 4 instructions/entry
1031// r16 has new value
1032//
1033//
1034 ALIGN_BLOCK
1035wrent_tbl:
1036//orig pvc_jsr wrent, dest=1
1037 nop
1038 mtpr r16, pt_entint
1039
1040 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1041 hw_rei
1042
1043
1044//orig pvc_jsr wrent, dest=1
1045 nop
1046 mtpr r16, pt_entarith
1047
1048 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1049 hw_rei
1050
1051
1052//orig pvc_jsr wrent, dest=1
1053 nop
1054 mtpr r16, pt_entmm
1055
1056 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1057 hw_rei
1058
1059
1060//orig pvc_jsr wrent, dest=1
1061 nop
1062 mtpr r16, pt_entif
1063
1064 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1065 hw_rei
1066
1067
1068//orig pvc_jsr wrent, dest=1
1069 nop
1070 mtpr r16, pt_entuna
1071
1072 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1073 hw_rei
1074
1075
1076//orig pvc_jsr wrent, dest=1
1077 nop
1078 mtpr r16, pt_entsys
1079
1080 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1081 hw_rei
1082
1083 ALIGN_BLOCK
1084//
1085// tbi_tbl
1086// Table to do tbi instructions
1087// 4 instructions per entry
1088//
1089tbi_tbl:
1090 // -2 tbia
1091//orig pvc_jsr tbi, dest=1
1092 mtpr r31, ev5__dtb_ia // Flush DTB
1093 mtpr r31, ev5__itb_ia // Flush ITB
1094
1095 hw_rei_stall
1096
1097 nop // Pad table
1098
1099 // -1 tbiap
1100//orig pvc_jsr tbi, dest=1
1101 mtpr r31, ev5__dtb_iap // Flush DTB
1102 mtpr r31, ev5__itb_iap // Flush ITB
1103
1104 hw_rei_stall
1105
1106 nop // Pad table
1107
1108
1109 // 0 unused
1110//orig pvc_jsr tbi, dest=1
1111 hw_rei // Pad table
1112 nop
1113 nop
1114 nop
1115
1116
1117 // 1 tbisi
1118//orig pvc_jsr tbi, dest=1
1119
1120 nop
1121 nop
1122 mtpr r17, ev5__itb_is // Flush ITB
1123 hw_rei_stall
1124
1125 // 2 tbisd
1126//orig pvc_jsr tbi, dest=1
1127 mtpr r17, ev5__dtb_is // Flush DTB.
1128 nop
1129
1130 nop
1131 hw_rei_stall
1132
1133
1134 // 3 tbis
1135//orig pvc_jsr tbi, dest=1
1136 mtpr r17, ev5__dtb_is // Flush DTB
1137 br r31, tbi_finish
1138 ALIGN_BRANCH
1139tbi_finish:
1140 mtpr r17, ev5__itb_is // Flush ITB
1141 hw_rei_stall
1142
1143
1144
1145 ALIGN_BLOCK
1146//
1147// bpt_bchk_common:
1148// Finish up the bpt/bchk instructions
1149//
1150bpt_bchk_common:
1151 stq r18, osfsf_a2(sp) // a2
1152 mfpr r13, pt_entif // get entry point
1153
1154 stq r12, osfsf_ps(sp) // save old ps
1155 stq r14, osfsf_pc(sp) // save pc
1156
1157 stq r29, osfsf_gp(sp) // save gp
1158 mtpr r13, exc_addr // load exc_addr with entIF
1159 // 1 cycle to hw_rei
1160
1161 mfpr r29, pt_kgp // get the kgp
1162
1163
1164 hw_rei_spe // done
1165
1166
1167 ALIGN_BLOCK
1168//
1169// rti_to_user
1170// Finish up the rti instruction
1171//
1172rti_to_user:
1173 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
1174 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
1175
1176 mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
1177 mtpr r25, pt_ksp // save off incase RTI to user
1178
1179 mfpr r30, pt_usp
1180 hw_rei_spe // and back
1181
1182
1183 ALIGN_BLOCK
1184//
1185// rti_to_kern
1186// Finish up the rti instruction
1187//
1188rti_to_kern:
1189 and r12, osfps_m_ipl, r11 // clean ps
1190 mfpr r12, pt_intmask // get int mask
1191
1192 extbl r12, r11, r12 // get mask for this ipl
1193 mtpr r25, pt_ksp // save off incase RTI to user
1194
1195 mtpr r12, ev5__ipl // set the new ipl.
1196 or r25, r31, sp // sp
1197
1198// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
1199 hw_rei
1200
1201 ALIGN_BLOCK
1202//
1203// swpctx_cont
1204// Finish up the swpctx instruction
1205//
1206
1207swpctx_cont:
1208
1209 bic r25, r24, r25 // clean icsr<FPE,PMP>
1210 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1211
1212 ldq_p r14, osfpcb_q_mmptr(r16)// get new mmptr
1213 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1214
1215 or r25, r12, r25 // icsr with new fen
1216 srl r23, 32, r24 // move asn to low asn pos
1217
1218 and r22, 1, r22
1219 sll r24, itb_asn_v_asn, r12
1220
1221 sll r22, icsr_v_pmp, r22
1222 nop
1223
1224 or r25, r22, r25 // icsr with new pme
1225
1226 sll r24, dtb_asn_v_asn, r24
1227
1228 subl r23, r13, r13 // gen new cc offset
1229 mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
1230
1231 mtpr r24, dtb_asn // Load up new ASN
1232 mtpr r25, icsr // write the icsr
1233
1234 sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1235 ldq_p r25, osfpcb_q_usp(r16) // get new usp
1236
1237 insll r13, 4, r13 // >> 32
1238// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1239 mtpr r14, pt_ptbr // load the new ptbr
1240
1241 mtpr r13, cc // set new offset
1242 ldq_p r30, osfpcb_q_ksp(r16) // get new ksp
1243
1244// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1245 mtpr r25, pt_usp // save usp
1246
1247no_pm_change_10_: hw_rei_stall // back we go
1248
1249 ALIGN_BLOCK
1250//
1251// swppal_cont - finish up the swppal call_pal
1252//
1253
1254swppal_cont:
1255 mfpr r2, pt_misc // get misc bits
1256 sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1257 or r2, r0, r2 // set the bit
1258 mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
1259 mtpr r2, pt_misc // update the chip
1260
1261 or r3, r31, r4
1262 mfpr r3, pt_impure // pass pointer to the impure area in r3
1263//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
1264//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
1265//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
1266//orig unfix_impure_ipr r3 // restore impure pointer
1267 lda r3, CNS_Q_IPR(r3)
1268 RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1269 RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1270 lda r3, -CNS_Q_IPR(r3)
1271
1272 or r31, r31, r0 // set status to success
1273// pvc_violate 1007
1274 jmp r31, (r4) // and call our friend, it's her problem now
1275
1276
1277swppal_fail:
1278 addq r0, 1, r0 // set unknown pal or not loaded
1279 hw_rei // and return
1280
1281
1282// .sbttl "Memory management"
1283
1284 ALIGN_BLOCK
1285//
1286//foe_ipte_handler
1287// IFOE detected on level 3 pte, sort out FOE vs ACV
1288//
1289// on entry:
1290// with
1291// R8 = pte
1292// R10 = pc
1293//
1294// Function
1295// Determine TNV vs ACV vs FOE. Build stack and dispatch
1296// Will not be here if TNV.
1297//
1298
1299foe_ipte_handler:
1300 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1301 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1302
1303 bis r11, r31, r12 // Save PS for stack write
1304 bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
1305
1306
1307 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1308 // no virt ref for next 2 cycles
1309 mtpr r30, pt_usp // save user stack
1310
1311 bis r31, r31, r11 // Set new PS
1312 mfpr r30, pt_ksp
1313
1314 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1315 nop
1316
1317foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1318 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1319
1320 or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
1321 mfpr r13, pt_entmm // get entry point
1322
1323 stq r16, osfsf_a0(sp) // a0
1324 or r14, r31, r16 // pass pc/va as a0
1325
1326 stq r17, osfsf_a1(sp) // a1
1327 nop
1328
1329 stq r18, osfsf_a2(sp) // a2
1330 lda r17, mmcsr_c_acv(r31) // assume ACV
1331
1332 stq r16, osfsf_pc(sp) // save pc
1333 cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
1334
1335 stq r12, osfsf_ps(sp) // save ps
1336 subq r31, 1, r18 // pass flag of istream as a2
1337
1338 stq r29, osfsf_gp(sp)
1339 mtpr r13, exc_addr // set vector address
1340
1341 mfpr r29, pt_kgp // load kgp
1342 hw_rei_spe // out to exec
1343
1344 ALIGN_BLOCK
1345//
1346//invalid_ipte_handler
1347// TNV detected on level 3 pte, sort out TNV vs ACV
1348//
1349// on entry:
1350// with
1351// R8 = pte
1352// R10 = pc
1353//
1354// Function
1355// Determine TNV vs ACV. Build stack and dispatch.
1356//
1357
1358invalid_ipte_handler:
1359 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1360 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1361
1362 bis r11, r31, r12 // Save PS for stack write
1363 bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
1364
1365
1366 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1367 // no virt ref for next 2 cycles
1368 mtpr r30, pt_usp // save user stack
1369
1370 bis r31, r31, r11 // Set new PS
1371 mfpr r30, pt_ksp
1372
1373 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1374 nop
1375
1376invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1377 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1378
1379 or r10, r31, r14 // Save pc/va in case TBmiss on stack
1380 mfpr r13, pt_entmm // get entry point
1381
1382 stq r16, osfsf_a0(sp) // a0
1383 or r14, r31, r16 // pass pc/va as a0
1384
1385 stq r17, osfsf_a1(sp) // a1
1386 nop
1387
1388 stq r18, osfsf_a2(sp) // a2
1389 and r25, 1, r17 // Isolate kre
1390
1391 stq r16, osfsf_pc(sp) // save pc
1392 xor r17, 1, r17 // map to acv/tnv as a1
1393
1394 stq r12, osfsf_ps(sp) // save ps
1395 subq r31, 1, r18 // pass flag of istream as a2
1396
1397 stq r29, osfsf_gp(sp)
1398 mtpr r13, exc_addr // set vector address
1399
1400 mfpr r29, pt_kgp // load kgp
1401 hw_rei_spe // out to exec
1402
1403
1404
1405
1406 ALIGN_BLOCK
1407//
1408//invalid_dpte_handler
1409// INVALID detected on level 3 pte, sort out TNV vs ACV
1410//
1411// on entry:
1412// with
1413// R10 = va
1414// R8 = pte
1415// R9 = mm_stat
1416// PT6 = pc
1417//
1418// Function
1419// Determine TNV vs ACV. Build stack and dispatch
1420//
1421
1422
1423invalid_dpte_handler:
1424 mfpr r12, pt6
1425 blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
1426
1427 bis r12, r31, r14 // save PC in case of tbmiss or fault
1428 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
1429
1430 mtpr r11, pt0 // Save PS for stack write
1431 and r25, mm_stat_m_opcode, r25 // isolate opcode
1432
1433 cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
1434 blbs r25, nmiss_fetch_ldr31_err // yes
1435
1436 //dismiss exception if load to r31/f31
1437 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
1438
1439 // not a store or fetch, must be a load
1440 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
1441
1442 and r25, 0x1F, r25 // isolate rnum
1443 nop
1444
1445 cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
1446 bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
1447
1448invalid_dpte_no_dismiss:
1449 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1450 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1451
1452 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1453 // no virt ref for next 2 cycles
1454 bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
1455
1456 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1457 mtpr r30, pt_usp // save user stack
1458
1459 bis r31, r31, r11 // Set new PS
1460 mfpr r30, pt_ksp
1461
1462invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
1463 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1464
1465 or r10, r31, r25 // Save va in case TBmiss on stack
1466 and r9, 1, r13 // save r/w flag
1467
1468 stq r16, osfsf_a0(sp) // a0
1469 or r25, r31, r16 // pass va as a0
1470
1471 stq r17, osfsf_a1(sp) // a1
1472 or r31, mmcsr_c_acv, r17 // assume acv
1473
1474 srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
1475 stq r29, osfsf_gp(sp)
1476
1477 stq r18, osfsf_a2(sp) // a2
1478 cmovlbs r13, r25, r12 // if write access move acv based on write enable
1479
1480 or r13, r31, r18 // pass flag of dstream access and read vs write
1481 mfpr r25, pt0 // get ps
1482
1483 stq r14, osfsf_pc(sp) // save pc
1484 mfpr r13, pt_entmm // get entry point
1485
1486 stq r25, osfsf_ps(sp) // save ps
1487 mtpr r13, exc_addr // set vector address
1488
1489 mfpr r29, pt_kgp // load kgp
1490 cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
1491
1492 hw_rei_spe // out to exec
1493
1494//
1495//
1496// We come here if we are erring on a dtb_miss, and the instr is a
1497// fetch, fetch_m, of load to r31/f31.
1498// The PC is incremented, and we return to the program.
1499// essentially ignoring the instruction and error.
1500//
1501//
1502 ALIGN_BLOCK
1503nmiss_fetch_ldr31_err:
1504 mfpr r12, pt6
1505 addq r12, 4, r12 // bump pc to pc+4
1506
1507 mtpr r12, exc_addr // and set entry point
1508 mfpr r31, pt0 // pad exc_addr write
1509
1510 hw_rei //
1511
1512 ALIGN_BLOCK
1513//
1514// double_pte_inv
1515// We had a single tbmiss which turned into a double tbmiss which found
1516// an invalid PTE. Return to single miss with a fake pte, and the invalid
1517// single miss flow will report the error.
1518//
1519// on entry:
1520// r21 PTE
1521// r22 available
1522// VA IPR locked with original fault VA
1523// pt4 saved r21
1524// pt5 saved r22
1525// pt6 original exc_addr
1526//
1527// on return to tbmiss flow:
1528// r8 fake PTE
1529//
1530//
1531//
1532double_pte_inv:
1533 srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
1534 mfpr r22, exc_addr // get the pc
1535
1536 lda r22, 4(r22) // inc the pc
1537 lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
1538
1539 cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
1540 mtpr r22, exc_addr // set for rei
1541
1542 mfpr r21, pt4 // restore regs
1543 mfpr r22, pt5 // restore regs
1544
1545 hw_rei // back to tb miss
1546
1547 ALIGN_BLOCK
1548//
1549//tnv_in_pal
1550// The only places in pal that ld or store are the
1551// stack builders, rti or retsys. Any of these mean we
1552// need to take a ksp not valid halt.
1553//
1554//
1555tnv_in_pal:
1556
1557
1558 br r31, ksp_inval_halt
1559
1560
1561// .sbttl "Icache flush routines"
1562
1563 ALIGN_BLOCK
1564//
1565// Common Icache flush routine.
1566//
1567//
1568//
1569pal_ic_flush:
1570 nop
1571 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
1572 nop
1573 nop
1574
1575// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
1576 nop
1577 nop
1578 nop
1579 nop
1580
1581 nop
1582 nop
1583 nop
1584 nop
1585
1586 nop
1587 nop // 10
1588
1589 nop
1590 nop
1591 nop
1592 nop
1593
1594 nop
1595 nop
1596 nop
1597 nop
1598
1599 nop
1600 nop // 20
1601
1602 nop
1603 nop
1604 nop
1605 nop
1606
1607 nop
1608 nop
1609 nop
1610 nop
1611
1612 nop
1613 nop // 30
1614 nop
1615 nop
1616 nop
1617 nop
1618
1619 nop
1620 nop
1621 nop
1622 nop
1623
1624 nop
1625 nop // 40
1626
1627 nop
1628 nop
1629
1630one_cycle_and_hw_rei:
1631 nop
1632 nop
1633
1634 hw_rei_stall
1635
1636 ALIGN_BLOCK
1637//
1638//osfpal_calpal_opcdec
1639// Here for all opcdec CALL_PALs
1640//
1641// Build stack frame
1642// a0 <- code
1643// a1 <- unpred
1644// a2 <- unpred
1645// vector via entIF
1646//
1647//
1648
1649osfpal_calpal_opcdec:
1650 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1651 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1652
1653 mfpr r14, exc_addr // get pc
1654 nop
1655
1656 bis r11, r31, r12 // Save PS for stack write
1657 bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
1658
1659
1660 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1661 // no virt ref for next 2 cycles
1662 mtpr r30, pt_usp // save user stack
1663
1664 bis r31, r31, r11 // Set new PS
1665 mfpr r30, pt_ksp
1666
1667osfpal_calpal_opcdec_10_:
1668 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1669 nop
1670
1671 stq r16, osfsf_a0(sp) // save regs
1672 bis r31, osf_a0_opdec, r16 // set a0
1673
1674 stq r18, osfsf_a2(sp) // a2
1675 mfpr r13, pt_entif // get entry point
1676
1677 stq r12, osfsf_ps(sp) // save old ps
1678 stq r17, osfsf_a1(sp) // a1
1679
1680 stq r14, osfsf_pc(sp) // save pc
1681 nop
1682
1683 stq r29, osfsf_gp(sp) // save gp
1684 mtpr r13, exc_addr // load exc_addr with entIF
1685 // 1 cycle to hw_rei
1686
1687 mfpr r29, pt_kgp // get the kgp
1688
1689
1690 hw_rei_spe // done
1691
1692
1693
1694
1695
1696//
1697//pal_update_pcb
1698// Update the PCB with the current SP, AST, and CC info
1699//
1700// r0 - return linkage
1701//
1702 ALIGN_BLOCK
1703
1704pal_update_pcb:
1705 mfpr r12, pt_pcbb // get pcbb
1706 and r11, osfps_m_mode, r25 // get mode
1707 beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
1708 mtpr r30, pt_usp // save user stack
1709 stq_p r30, osfpcb_q_usp(r12) // store usp
1710 br r31, pal_update_pcb_20_ // join common
1711pal_update_pcb_10_: stq_p r30, osfpcb_q_ksp(r12) // store ksp
1712pal_update_pcb_20_: rpcc r13 // get cyccounter
1713 srl r13, 32, r14 // move offset
1714 addl r13, r14, r14 // merge for new time
1715 stl_p r14, osfpcb_l_cc(r12) // save time
1716
1717//orig pvc_jsr updpcb, bsr=1, dest=1
1718 ret r31, (r0)
1719
1720
1721//
1722// pal_save_state
1723//
1724// Function
1725// All chip state saved, all PT's, SR's FR's, IPR's
1726//
1727//
1728// Regs' on entry...
1729//
1730// R0 = halt code
1731// pt0 = r0
1732// R1 = pointer to impure
1733// pt4 = r1
1734// R3 = return addr
1735// pt5 = r3
1736//
1737// register usage:
1738// r0 = halt_code
1739// r1 = addr of impure area
1740// r3 = return_address
1741// r4 = scratch
1742//
1743//
1744
1745 ALIGN_BLOCK
1746 .globl pal_save_state
1747pal_save_state:
1748//
1749//
1750// start of implementation independent save routine
1751//
1752// the impure area is larger than the addressibility of hw_ld and hw_st
1753// therefore, we need to play some games: The impure area
1754// is informally divided into the "machine independent" part and the
1755// "machine dependent" part. The state that will be saved in the
1756// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
1757// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
1758// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
1759// macros will automagically adjust the offset correctly.
1760//
1761
1762// The distributed code is commented out and followed by corresponding SRC code.
1763// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
1764
1765//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
1766 lda r1, 0x200(r1) // Point to center of CPU segment
1767//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
1768 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
1769//orig store_reg1 hlt, r0, r1, ipr=1
1770 SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
1771
1772 mfpr r0, pt0 // get r0 back //orig
1773//orig store_reg1 0, r0, r1 // save r0
1774 SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
1775
1776 mfpr r0, pt4 // get r1 back //orig
1777//orig store_reg1 1, r0, r1 // save r1
1778 SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
1779
1780//orig store_reg 2 // save r2
1781 SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
1782
1783 mfpr r0, pt5 // get r3 back //orig
1784//orig store_reg1 3, r0, r1 // save r3
1785 SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
1786
1787 // reason code has been saved
1788 // r0 has been saved
1789 // r1 has been saved
1790 // r2 has been saved
1791 // r3 has been saved
1792 // pt0, pt4, pt5 have been lost
1793
1794 //
1795 // Get out of shadow mode
1796 //
1797
1798 mfpr r2, icsr // Get icsr
1799 ldah r0, (1<<(icsr_v_sde-16))(r31)
1800 bic r2, r0, r0 // ICSR with SDE clear
1801 mtpr r0, icsr // Turn off SDE
1802
1803 mfpr r31, pt0 // SDE bubble cycle 1
1804 mfpr r31, pt0 // SDE bubble cycle 2
1805 mfpr r31, pt0 // SDE bubble cycle 3
1806 nop
1807
1808
1809 // save integer regs R4-r31
1810 SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
1811 SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
1812 SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
1813 SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
1814 SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
1815 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
1816 SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
1817 SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
1818 SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
1819 SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
1820 SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
1821 SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
1822 SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
1823 SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
1824 SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
1825 SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
1826 SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
1827 SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
1828 SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
1829 SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
1830 SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
1831 SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
1832 SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
1833 SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
1834 SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
1835 SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
1836 SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
1837 SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
1838
1839 // save all paltemp regs except pt0
1840
1841//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
1842//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
1843
1844 lda r1, -0x200(r1) // Restore the impure base address.
1845 lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
1846 SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
1847 SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
1848 SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
1849 SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
1850 SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
1851 SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
1852 SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
1853 SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
1854 SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
1855 SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
1856 SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
1857 SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
1858 SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
1859 SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
1860 SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
1861 SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
1862 SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
1863 SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
1864 SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
1865 SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
1866 SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
1867 SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
1868 SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
1869 SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
1870
1871 // Restore shadow mode
1872 mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write)
1873 mfpr r31, pt0
1874 mtpr r2, icsr // Restore original ICSR
1875
1876 mfpr r31, pt0 // SDE bubble cycle 1
1877 mfpr r31, pt0 // SDE bubble cycle 2
1878 mfpr r31, pt0 // SDE bubble cycle 3
1879 nop
1880
1881 // save all integer shadow regs
1882 SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
1883 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
1884 SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
1885 SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
1886 SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
1887 SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
1888 SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
1889 SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
1890
1891 SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
1892 SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
1893 SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
1894 SAVE_IPR(va,CNS_Q_VA,r1)
1895 SAVE_IPR(icsr,CNS_Q_ICSR,r1)
1896 SAVE_IPR(ipl,CNS_Q_IPL,r1)
1897 SAVE_IPR(ips,CNS_Q_IPS,r1)
1898 SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
1899 SAVE_IPR(aster,CNS_Q_ASTER,r1)
1900 SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
1901 SAVE_IPR(sirr,CNS_Q_SIRR,r1)
1902 SAVE_IPR(isr,CNS_Q_ISR,r1)
1903 SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
1904 SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
1905 SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
1906
1907//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
1908//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
1909//orig // PVC violation applies only to
1910pvc$osf35$379: // loads. HW_ST ok here, so ignore
1911 SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
1912
1913
1914 //the following iprs are informational only -- will not be restored
1915
1916 SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
1917 SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
1918 SAVE_IPR(intId,CNS_Q_INT_ID,r1)
1919 SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
1920 SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
1921 ldah r14, 0xFFF0(zero)
1922 zap r14, 0xE0, r14 // Get base address of CBOX IPRs
1923 NOP // Pad mfpr dcPerr out of shadow of
1924 NOP // last store
1925 NOP
1926 SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
1927
1928 // read cbox ipr state
1929
1930 mb
1931 ldq_p r2, scCtl(r14)
1932 ldq_p r13, ldLock(r14)
1933 ldq_p r4, scAddr(r14)
1934 ldq_p r5, eiAddr(r14)
1935 ldq_p r6, bcTagAddr(r14)
1936 ldq_p r7, fillSyn(r14)
1937 bis r5, r4, zero // Make sure all loads complete before
1938 bis r7, r6, zero // reading registers that unlock them.
1939 ldq_p r8, scStat(r14) // Unlocks scAddr.
1940 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
1941 ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
1942 mb
1943
1944 // save cbox ipr state
1945 SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
1946 SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
1947 SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
1948 SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
1949 SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
1950 SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
1951 SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
1952 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
1953 //bc_config? sl_rcv?
1954
1955// restore impure base
1956//orig unfix_impure_ipr r1
1957 lda r1, -CNS_Q_IPR(r1)
1958
1959// save all floating regs
1960 mfpr r0, icsr // get icsr
1961 or r31, 1, r2 // get a one
1962 sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
1963 or r2, r0, r0 // set FEN on
1964 mtpr r0, icsr // write to icsr, enabling FEN
1965
1966// map the save area virtually
1967 mtpr r31, dtbIa // Clear all DTB entries
1968 srl r1, va_s_off, r0 // Clean off byte-within-page offset
1969 sll r0, pte_v_pfn, r0 // Shift to form PFN
1970 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1971 mtpr r0, dtbPte // Load the PTE and set valid
1972 mtpr r1, dtbTag // Write the PTE and tag into the DTB
1973
1974
1975// map the next page too - in case the impure area crosses a page boundary
1976 lda r4, (1<<va_s_off)(r1) // Generate address for next page
1977 srl r4, va_s_off, r0 // Clean off byte-within-page offset
1978 sll r0, pte_v_pfn, r0 // Shift to form PFN
1979 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1980 mtpr r0, dtbPte // Load the PTE and set valid
1981 mtpr r4, dtbTag // Write the PTE and tag into the DTB
1982
1983 sll r31, 0, r31 // stall cycle 1
1984 sll r31, 0, r31 // stall cycle 2
1985 sll r31, 0, r31 // stall cycle 3
1986 nop
1987
1988// add offset for saving fpr regs
1989//orig fix_impure_gpr r1
1990 lda r1, 0x200(r1) // Point to center of CPU segment
1991
1992// now save the regs - F0-F31
1993 mf_fpcr f0 // original
1994
1995 SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
1996 SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
1997 SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
1998 SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
1999 SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
2000 SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
2001 SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
2002 SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
2003 SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
2004 SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
2005 SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
2006 SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
2007 SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
2008 SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
2009 SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
2010 SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
2011 SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
2012 SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
2013 SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
2014 SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
2015 SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2016 SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2017 SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2018 SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2019 SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2020 SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2021 SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2022 SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2023 SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2024 SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2025 SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2026 SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2027
2028//switch impure offset from gpr to ipr---
2029//orig unfix_impure_gpr r1
2030//orig fix_impure_ipr r1
2031//orig store_reg1 fpcsr, f0, r1, fpcsr=1
2032
2033 SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach
2034 lda r1, -0x200(r1) // Restore the impure base address
2035
2036// and back to gpr ---
2037//orig unfix_impure_ipr r1
2038//orig fix_impure_gpr r1
2039
2040//orig lda r0, cns_mchksize(r31) // get size of mchk area
2041//orig store_reg1 mchkflag, r0, r1, ipr=1
2042//orig mb
2043
2044 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
2045 // save this using the IPR base (it is closer) not the GRP base as they used...pb
2046 lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
2047 SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2048 mb
2049
2050//orig or r31, 1, r0 // get a one
2051//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
2052//orig mb
2053
2054 lda r1, -CNS_Q_IPR(r1) // back to the base
2055 lda r1, 0x200(r1) // Point to center of CPU segment
2056 or r31, 1, r0 // get a one
2057 SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
2058 mb
2059
2060 // restore impure area base
2061//orig unfix_impure_gpr r1
2062 lda r1, -0x200(r1) // Point to center of CPU segment
2063
2064 mtpr r31, dtb_ia // clear the dtb
2065 mtpr r31, itb_ia // clear the itb
2066
2067//orig pvc_jsr savsta, bsr=1, dest=1
2068 ret r31, (r3) // and back we go
2069
2070
2071
2072// .sbttl "PAL_RESTORE_STATE"
2073//
2074//
2075// Pal_restore_state
2076//
2077//
2078// register usage:
2079// r1 = addr of impure area
2080// r3 = return_address
2081// all other regs are scratchable, as they are about to
2082// be reloaded from ram.
2083//
2084// Function:
2085// All chip state restored, all SRs, FRs, PTs, IPRs
2086// *** except R1, R3, PT0, PT4, PT5 ***
2087//
2088//
2089 ALIGN_BLOCK
2090pal_restore_state:
2091
2092//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2093
2094// map the console io area virtually
2095 mtpr r31, dtbIa // Clear all DTB entries
2096 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2097 sll r0, pte_v_pfn, r0 // Shift to form PFN
2098 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2099 mtpr r0, dtbPte // Load the PTE and set valid
2100 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2101
2102
2103// map the next page too, in case impure area crosses page boundary
2104 lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
2105 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2106 sll r0, pte_v_pfn, r0 // Shift to form PFN
2107 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2108 mtpr r0, dtbPte // Load the PTE and set valid
2109 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2110
2111// save all floating regs
2112 mfpr r0, icsr // Get current ICSR
2113 bis zero, 1, r2 // Get a '1'
2114 or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
2115 sll r2, icsr_v_fpe, r2 // Shift bits into position
2116 bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
2117 mtpr r0, icsr // Update the chip
2118
2119 mfpr r31, pt0 // FPE bubble cycle 1 //orig
2120 mfpr r31, pt0 // FPE bubble cycle 2 //orig
2121 mfpr r31, pt0 // FPE bubble cycle 3 //orig
2122
2123//orig fix_impure_ipr r1
2124//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
2125//orig mt_fpcr f0
2126//orig
2127//orig unfix_impure_ipr r1
2128//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
2129 lda r1, 200(r1) // Point to base of IPR area again
2130 RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
2131 mt_fpcr f0 // original
2132
2133 lda r1, 0x200(r1) // point to center of CPU segment
2134
2135// restore all floating regs
2136 RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
2137 RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
2138 RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
2139 RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
2140 RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
2141 RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
2142 RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
2143 RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
2144 RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
2145 RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
2146 RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
2147 RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
2148 RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
2149 RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
2150 RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
2151 RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
2152 RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
2153 RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
2154 RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
2155 RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
2156 RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2157 RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2158 RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2159 RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2160 RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2161 RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2162 RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2163 RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2164 RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2165 RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2166 RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2167 RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2168
2169// switch impure pointer from gpr to ipr area --
2170//orig unfix_impure_gpr r1
2171//orig fix_impure_ipr r1
2172 lda r1, -0x200(r1) // Restore base address of impure area.
2173 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
2174
2175// restore all pal regs
2176 RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
2177 RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
2178 RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
2179 RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
2180 RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
2181 RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
2182 RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
2183 RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
2184 RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
2185 RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
2186 RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
2187 RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
2188 RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
2189 RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
2190 RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
2191 RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
2192 RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
2193 RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
2194 RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
2195 RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
2196 RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2197 RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2198 RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2199 RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2200
2201
2202//orig restore_reg exc_addr, ipr=1 // restore ipr
2203//orig restore_reg pal_base, ipr=1 // restore ipr
2204//orig restore_reg ipl, ipr=1 // restore ipr
2205//orig restore_reg ps, ipr=1 // restore ipr
2206//orig mtpr r0, dtb_cm // set current mode in mbox too
2207//orig restore_reg itb_asn, ipr=1
2208//orig srl r0, itb_asn_v_asn, r0
2209//orig sll r0, dtb_asn_v_asn, r0
2210//orig mtpr r0, dtb_asn // set ASN in Mbox too
2211//orig restore_reg ivptbr, ipr=1
2212//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
2213//orig restore_reg mcsr, ipr=1
2214//orig restore_reg aster, ipr=1
2215//orig restore_reg astrr, ipr=1
2216//orig restore_reg sirr, ipr=1
2217//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
2218//orig mfpr r31, pt0 // (may issue with mt maf_mode)
2219//orig mfpr r31, pt0 // bubble cycle 1
2220//orig mfpr r31, pt0 // bubble cycle 2
2221//orig mfpr r31, pt0 // bubble cycle 3
2222//orig mfpr r31, pt0 // (may issue with following ld)
2223
2224 // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
2225 RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2226 RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2227 RESTORE_IPR(ipl,CNS_Q_IPL,r1)
2228 RESTORE_IPR(ips,CNS_Q_IPS,r1)
2229 mtpr r0, dtbCm // Set Mbox current mode too.
2230 RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2231 srl r0, 4, r0
2232 sll r0, 57, r0
2233 mtpr r0, dtbAsn // Set Mbox ASN too
2234 RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2235 mtpr r0, mVptBr // Set Mbox VptBr too
2236 RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
2237 RESTORE_IPR(aster,CNS_Q_ASTER,r1)
2238 RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
2239 RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
2240 RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
2241 STALL
2242 STALL
2243 STALL
2244 STALL
2245 STALL
2246
2247
2248 // restore all integer shadow regs
2249 RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
2250 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2251 RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2252 RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2253 RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2254 RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2255 RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2256 RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2257 RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2258
2259 //
2260 // Get out of shadow mode
2261 //
2262
2263 mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway)
2264 mfpr r31, pt0 // ""
2265 mfpr r0, icsr // Get icsr
2266 ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location
2267 bic r0, r2, r2 // ICSR with SDE clear
2268 mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles
2269
2270 mfpr r31, pt0 // SDE bubble cycle 1
2271 mfpr r31, pt0 // SDE bubble cycle 2
2272 mfpr r31, pt0 // SDE bubble cycle 3
2273 nop
2274
2275// switch impure pointer from ipr to gpr area --
2276//orig unfix_impure_ipr r1
2277//orig fix_impure_gpr r1
2278
2279// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
2280
2281 lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
2282 lda r1, 0x200(r1) // Point to center of CPU segment
2283
2284 // restore all integer regs
2285 RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
2286 RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
2287 RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
2288 RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
2289 RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
2290 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
2291 RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
2292 RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
2293 RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
2294 RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
2295 RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
2296 RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
2297 RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
2298 RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
2299 RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
2300 RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
2301 RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2302 RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2303 RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2304 RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2305 RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2306 RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2307 RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2308 RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2309 RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2310 RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2311 RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2312 RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2313
2314//orig // switch impure pointer from gpr to ipr area --
2315//orig unfix_impure_gpr r1
2316//orig fix_impure_ipr r1
2317//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
2318
2319 lda t0, -0x200(t0) // Restore base address of impure area.
2320 lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
2321 RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
2322
2323//orig // and back again --
2324//orig unfix_impure_ipr r1
2325//orig fix_impure_gpr r1
2326//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
2327//orig mb
2328
2329 lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
2330 lda t0, 0x200(t0) // and back to center of CPU segment
2331 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
2332 mb
2333
2334//orig // and back we go
2335//orig// restore_reg 3
2336//orig restore_reg 2
2337//orig// restore_reg 1
2338//orig restore_reg 0
2339//orig // restore impure area base
2340//orig unfix_impure_gpr r1
2341
2342 RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
2343 RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
2344 lda r1, -0x200(r1) // Restore impure base address
2345
2346 mfpr r31, pt0 // stall for ldq_p above //orig
2347
2348 mtpr r31, dtb_ia // clear the tb //orig
2349 mtpr r31, itb_ia // clear the itb //orig
2350
2351//orig pvc_jsr rststa, bsr=1, dest=1
2352 ret r31, (r3) // back we go //orig
2353
2354
2355//
2356// pal_pal_bug_check -- code has found a bugcheck situation.
2357// Set things up and join common machine check flow.
2358//
2359// Input:
2360// r14 - exc_addr
2361//
2362// On exit:
2363// pt0 - saved r0
2364// pt1 - saved r1
2365// pt4 - saved r4
2366// pt5 - saved r5
2367// pt6 - saved r6
2368// pt10 - saved exc_addr
2369// pt_misc<47:32> - mchk code
2370// pt_misc<31:16> - scb vector
2371// r14 - base of Cbox IPRs in IO space
2372// MCES<mchk> is set
2373//
2374
2375 ALIGN_BLOCK
2376 .globl pal_pal_bug_check_from_int
2377pal_pal_bug_check_from_int:
2378 DEBUGSTORE(0x79)
2379//simos DEBUG_EXC_ADDR()
2380 DEBUGSTORE(0x20)
2381//simos bsr r25, put_hex
2382 lda r25, mchk_c_bugcheck(r31)
2383 addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
2384 br r31, pal_pal_mchk
2385 nop
2386
2387pal_pal_bug_check:
2388 lda r25, mchk_c_bugcheck(r31)
2389
2390pal_pal_mchk:
2391 sll r25, 32, r25 // Move mchk code to position
2392
2393 mtpr r14, pt10 // Stash exc_addr
2394 mtpr r14, exc_addr
2395
2396 mfpr r12, pt_misc // Get MCES and scratch
2397 zap r12, 0x3c, r12
2398
2399 or r12, r25, r12 // Combine mchk code
2400 lda r25, scb_v_procmchk(r31) // Get SCB vector
2401
2402 sll r25, 16, r25 // Move SCBv to position
2403 or r12, r25, r25 // Combine SCBv
2404
2405 mtpr r0, pt0 // Stash for scratch
2406 bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
2407
2408 mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
2409 ldah r14, 0xfff0(r31)
2410
2411 mtpr r1, pt1 // Stash for scratch
2412 zap r14, 0xE0, r14 // Get Cbox IPR base
2413
2414 mtpr r4, pt4
2415 mtpr r5, pt5
2416
2417 mtpr r6, pt6
2418 blbs r12, sys_double_machine_check // MCHK halt if double machine check
2419
2420 br r31, sys_mchk_collect_iprs // Join common machine check flow
2421
2422
2423
2424// align_to_call_pal_section
2425// Align to address of first call_pal entry point - 2000
2426
2427//
2428// HALT - PALcode for HALT instruction
2429//
2430// Entry:
2431// Vectored into via hardware PALcode instruction dispatch.
2432//
2433// Function:
2434// GO to console code
2435//
2436//
2437
2438 .text 1
2439// . = 0x2000
2440 CALL_PAL_PRIV(PAL_HALT_ENTRY)
2441call_pal_halt:
2442 mfpr r31, pt0 // Pad exc_addr read
2443 mfpr r31, pt0
2444
2445 mfpr r12, exc_addr // get PC
2446 subq r12, 4, r12 // Point to the HALT
2447
2448 mtpr r12, exc_addr
2449 mtpr r0, pt0
2450
2451//orig pvc_jsr updpcb, bsr=1
2452 bsr r0, pal_update_pcb // update the pcb
2453 lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
2454 br r31, sys_enter_console // enter the console
2455
2456//
2457// CFLUSH - PALcode for CFLUSH instruction
2458//
2459// Entry:
2460// Vectored into via hardware PALcode instruction dispatch.
2461//
2462// R16 - contains the PFN of the page to be flushed
2463//
2464// Function:
2465// Flush all Dstream caches of 1 entire page
2466// The CFLUSH routine is in the system specific module.
2467//
2468//
2469
2470 CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
2471Call_Pal_Cflush:
2472 br r31, sys_cflush
2473
2474//
2475// DRAINA - PALcode for DRAINA instruction
2476//
2477// Entry:
2478// Vectored into via hardware PALcode instruction dispatch.
2479// Implicit TRAPB performed by hardware.
2480//
2481// Function:
2482// Stall instruction issue until all prior instructions are guaranteed to
2483// complete without incurring aborts. For the EV5 implementation, this
2484// means waiting until all pending DREADS are returned.
2485//
2486//
2487
2488 CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
2489Call_Pal_Draina:
2490 ldah r14, 0x100(r31) // Init counter. Value?
2491 nop
2492
2493DRAINA_LOOP:
2494 subq r14, 1, r14 // Decrement counter
2495 mfpr r13, ev5__maf_mode // Fetch status bit
2496
2497 srl r13, maf_mode_v_dread_pending, r13
2498 ble r14, DRAINA_LOOP_TOO_LONG
2499
2500 nop
2501 blbs r13, DRAINA_LOOP // Wait until all DREADS clear
2502
2503 hw_rei
2504
2505DRAINA_LOOP_TOO_LONG:
2506 br r31, call_pal_halt
2507
2508// CALL_PAL OPCDECs
2509
2510 CALL_PAL_PRIV(0x0003)
2511CallPal_OpcDec03:
2512 br r31, osfpal_calpal_opcdec
2513
2514 CALL_PAL_PRIV(0x0004)
2515CallPal_OpcDec04:
2516 br r31, osfpal_calpal_opcdec
2517
2518 CALL_PAL_PRIV(0x0005)
2519CallPal_OpcDec05:
2520 br r31, osfpal_calpal_opcdec
2521
2522 CALL_PAL_PRIV(0x0006)
2523CallPal_OpcDec06:
2524 br r31, osfpal_calpal_opcdec
2525
2526 CALL_PAL_PRIV(0x0007)
2527CallPal_OpcDec07:
2528 br r31, osfpal_calpal_opcdec
2529
2530 CALL_PAL_PRIV(0x0008)
2531CallPal_OpcDec08:
2532 br r31, osfpal_calpal_opcdec
2533
2534//
2535// CSERVE - PALcode for CSERVE instruction
2536//
2537// Entry:
2538// Vectored into via hardware PALcode instruction dispatch.
2539//
2540// Function:
2541// Various functions for private use of console software
2542//
2543// option selector in r0
2544// arguments in r16....
2545// The CSERVE routine is in the system specific module.
2546//
2547//
2548
2549 CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
2550Call_Pal_Cserve:
2551 br r31, sys_cserve
2552
2553//
2554// swppal - PALcode for swppal instruction
2555//
2556// Entry:
2557// Vectored into via hardware PALcode instruction dispatch.
2558// Vectored into via hardware PALcode instruction dispatch.
2559// R16 contains the new PAL identifier
2560// R17:R21 contain implementation-specific entry parameters
2561//
2562// R0 receives status:
2563// 0 success (PAL was switched)
2564// 1 unknown PAL variant
2565// 2 known PAL variant, but PAL not loaded
2566//
2567//
2568// Function:
2569// Swap control to another PAL.
2570//
2571
2572 CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
2573Call_Pal_Swppal:
2574 cmpule r16, 255, r0 // see if a kibble was passed
2575 cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
2576
2577 or r16, r31, r3 // set r3 incase this is a address
2578 blbc r0, swppal_cont // nope, try it as an address
2579
2580 cmpeq r16, 2, r0 // is it our friend OSF?
2581 blbc r0, swppal_fail // nope, don't know this fellow
2582
2583 br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
2584
2585// .global osfpal_hw_entry_reset
2586// .weak osfpal_hw_entry_reset
2587// .long <osfpal_hw_entry_reset-pal_start>
2588//orig halt // don't know how to get the address here - kludge ok, load pal at 0
2589 .long 0 // ?? hack upon hack...pb
2590
2591CALL_PAL_SWPPAL_10_: ldl_p r3, 0(r2) // fetch target addr
2592// ble r3, swppal_fail ; if OSF not linked in say not loaded.
2593 mfpr r2, pal_base // fetch pal base
2594
2595 addq r2, r3, r3 // add pal base
2596 lda r2, 0x3FFF(r31) // get pal base checker mask
2597
2598 and r3, r2, r2 // any funky bits set?
2599 cmpeq r2, 0, r0 //
2600
2601 blbc r0, swppal_fail // return unknown if bad bit set.
2602 br r31, swppal_cont
2603
2604// .sbttl "CALL_PAL OPCDECs"
2605
2606 CALL_PAL_PRIV(0x000B)
2607CallPal_OpcDec0B:
2608 br r31, osfpal_calpal_opcdec
2609
2610 CALL_PAL_PRIV(0x000C)
2611CallPal_OpcDec0C:
2612 br r31, osfpal_calpal_opcdec
2613
2614//
2615// wripir - PALcode for wripir instruction
2616//
2617// Entry:
2618// Vectored into via hardware PALcode instruction dispatch.
2619// r16 = processor number to interrupt
2620//
2621// Function:
2622// IPIR <- R16
2623// Handled in system-specific code
2624//
2625// Exit:
2626// interprocessor interrupt is recorded on the target processor
2627// and is initiated when the proper enabling conditions are present.
2628//
2629
2630 CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
2631Call_Pal_Wrpir:
2632 br r31, sys_wripir
2633
2634// .sbttl "CALL_PAL OPCDECs"
2635
2636 CALL_PAL_PRIV(0x000E)
2637CallPal_OpcDec0E:
2638 br r31, osfpal_calpal_opcdec
2639
2640 CALL_PAL_PRIV(0x000F)
2641CallPal_OpcDec0F:
2642 br r31, osfpal_calpal_opcdec
2643
2644//
2645// rdmces - PALcode for rdmces instruction
2646//
2647// Entry:
2648// Vectored into via hardware PALcode instruction dispatch.
2649//
2650// Function:
2651// R0 <- ZEXT(MCES)
2652//
2653
2654 CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
2655Call_Pal_Rdmces:
2656 mfpr r0, pt_mces // Read from PALtemp
2657 and r0, mces_m_all, r0 // Clear other bits
2658
2659 hw_rei
2660
2661//
2662// wrmces - PALcode for wrmces instruction
2663//
2664// Entry:
2665// Vectored into via hardware PALcode instruction dispatch.
2666//
2667// Function:
2668// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
2669// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
2670// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
2671// MCES<3> <- R16<3> (DPC)
2672// MCES<4> <- R16<4> (DSC)
2673//
2674//
2675
2676 CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
2677Call_Pal_Wrmces:
2678 and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
2679 mfpr r14, pt_mces // Get current value
2680
2681 ornot r31, r13, r13 // Flip all the bits
2682 and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
2683
2684 and r14, r13, r1 // Update MCHK, SCE, PCE
2685 bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
2686
2687 or r1, r17, r1 // Update DPC and DSC
2688 mtpr r1, pt_mces // Write MCES back
2689
2690 nop // Pad to fix PT write->read restriction
2691
2692 nop
2693 hw_rei
2694
2695
2696
2697// CALL_PAL OPCDECs
2698
2699 CALL_PAL_PRIV(0x0012)
2700CallPal_OpcDec12:
2701 br r31, osfpal_calpal_opcdec
2702
2703 CALL_PAL_PRIV(0x0013)
2704CallPal_OpcDec13:
2705 br r31, osfpal_calpal_opcdec
2706
2707 CALL_PAL_PRIV(0x0014)
2708CallPal_OpcDec14:
2709 br r31, osfpal_calpal_opcdec
2710
2711 CALL_PAL_PRIV(0x0015)
2712CallPal_OpcDec15:
2713 br r31, osfpal_calpal_opcdec
2714
2715 CALL_PAL_PRIV(0x0016)
2716CallPal_OpcDec16:
2717 br r31, osfpal_calpal_opcdec
2718
2719 CALL_PAL_PRIV(0x0017)
2720CallPal_OpcDec17:
2721 br r31, osfpal_calpal_opcdec
2722
2723 CALL_PAL_PRIV(0x0018)
2724CallPal_OpcDec18:
2725 br r31, osfpal_calpal_opcdec
2726
2727 CALL_PAL_PRIV(0x0019)
2728CallPal_OpcDec19:
2729 br r31, osfpal_calpal_opcdec
2730
2731 CALL_PAL_PRIV(0x001A)
2732CallPal_OpcDec1A:
2733 br r31, osfpal_calpal_opcdec
2734
2735 CALL_PAL_PRIV(0x001B)
2736CallPal_OpcDec1B:
2737 br r31, osfpal_calpal_opcdec
2738
2739 CALL_PAL_PRIV(0x001C)
2740CallPal_OpcDec1C:
2741 br r31, osfpal_calpal_opcdec
2742
2743 CALL_PAL_PRIV(0x001D)
2744CallPal_OpcDec1D:
2745 br r31, osfpal_calpal_opcdec
2746
2747 CALL_PAL_PRIV(0x001E)
2748CallPal_OpcDec1E:
2749 br r31, osfpal_calpal_opcdec
2750
2751 CALL_PAL_PRIV(0x001F)
2752CallPal_OpcDec1F:
2753 br r31, osfpal_calpal_opcdec
2754
2755 CALL_PAL_PRIV(0x0020)
2756CallPal_OpcDec20:
2757 br r31, osfpal_calpal_opcdec
2758
2759 CALL_PAL_PRIV(0x0021)
2760CallPal_OpcDec21:
2761 br r31, osfpal_calpal_opcdec
2762
2763 CALL_PAL_PRIV(0x0022)
2764CallPal_OpcDec22:
2765 br r31, osfpal_calpal_opcdec
2766
2767 CALL_PAL_PRIV(0x0023)
2768CallPal_OpcDec23:
2769 br r31, osfpal_calpal_opcdec
2770
2771 CALL_PAL_PRIV(0x0024)
2772CallPal_OpcDec24:
2773 br r31, osfpal_calpal_opcdec
2774
2775 CALL_PAL_PRIV(0x0025)
2776CallPal_OpcDec25:
2777 br r31, osfpal_calpal_opcdec
2778
2779 CALL_PAL_PRIV(0x0026)
2780CallPal_OpcDec26:
2781 br r31, osfpal_calpal_opcdec
2782
2783 CALL_PAL_PRIV(0x0027)
2784CallPal_OpcDec27:
2785 br r31, osfpal_calpal_opcdec
2786
2787 CALL_PAL_PRIV(0x0028)
2788CallPal_OpcDec28:
2789 br r31, osfpal_calpal_opcdec
2790
2791 CALL_PAL_PRIV(0x0029)
2792CallPal_OpcDec29:
2793 br r31, osfpal_calpal_opcdec
2794
2795 CALL_PAL_PRIV(0x002A)
2796CallPal_OpcDec2A:
2797 br r31, osfpal_calpal_opcdec
2798
2799//
2800// wrfen - PALcode for wrfen instruction
2801//
2802// Entry:
2803// Vectored into via hardware PALcode instruction dispatch.
2804//
2805// Function:
2806// a0<0> -> ICSR<FPE>
2807// Store new FEN in PCB
2808// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16)
2809// are UNPREDICTABLE
2810//
2811// Issue: What about pending FP loads when FEN goes from on->off????
2812//
2813
2814 CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
2815Call_Pal_Wrfen:
2816 or r31, 1, r13 // Get a one
2817 mfpr r1, ev5__icsr // Get current FPE
2818
2819 sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
2820 and r16, 1, r16 // clean new fen
2821
2822 sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
2823 bic r1, r13, r1 // zero icsr<fpe>
2824
2825 or r1, r12, r1 // Or new FEN into ICSR
2826 mfpr r12, pt_pcbb // Get PCBB - E1
2827
2828 mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
2829 stl_p r16, osfpcb_q_fen(r12) // Store FEN in PCB.
2830
2831 mfpr r31, pt0 // Pad ICSR<FPE> write.
2832 mfpr r31, pt0
2833
2834 mfpr r31, pt0
2835// pvc_violate 225 // cuz PVC can't distinguish which bits changed
2836 hw_rei
2837
2838
2839 CALL_PAL_PRIV(0x002C)
2840CallPal_OpcDec2C:
2841 br r31, osfpal_calpal_opcdec
2842
2843//
2844// wrvptpr - PALcode for wrvptpr instruction
2845//
2846// Entry:
2847// Vectored into via hardware PALcode instruction dispatch.
2848//
2849// Function:
2850// vptptr <- a0 (r16)
2851//
2852
2853 CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
2854Call_Pal_Wrvptptr:
2855 mtpr r16, ev5__mvptbr // Load Mbox copy
2856 mtpr r16, ev5__ivptbr // Load Ibox copy
2857 nop // Pad IPR write
2858 nop
2859 hw_rei
2860
2861 CALL_PAL_PRIV(0x002E)
2862CallPal_OpcDec2E:
2863 br r31, osfpal_calpal_opcdec
2864
2865 CALL_PAL_PRIV(0x002F)
2866CallPal_OpcDec2F:
2867 br r31, osfpal_calpal_opcdec
2868
2869
2870//
2871// swpctx - PALcode for swpctx instruction
2872//
2873// Entry:
2874// hardware dispatch via callPal instruction
2875// R16 -> new pcb
2876//
2877// Function:
2878// dynamic state moved to old pcb
2879// new state loaded from new pcb
2880// pcbb pointer set
2881// old pcbb returned in R0
2882//
2883// Note: need to add perf monitor stuff
2884//
2885
2886 CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
2887Call_Pal_Swpctx:
2888 rpcc r13 // get cyccounter
2889 mfpr r0, pt_pcbb // get pcbb
2890
2891 ldq_p r22, osfpcb_q_fen(r16) // get new fen/pme
2892 ldq_p r23, osfpcb_l_cc(r16) // get new asn
2893
2894 srl r13, 32, r25 // move offset
2895 mfpr r24, pt_usp // get usp
2896
2897 stq_p r30, osfpcb_q_ksp(r0) // store old ksp
2898// pvc_violate 379 // stq_p can't trap except replay. only problem if mf same ipr in same shadow.
2899 mtpr r16, pt_pcbb // set new pcbb
2900
2901 stq_p r24, osfpcb_q_usp(r0) // store usp
2902 addl r13, r25, r25 // merge for new time
2903
2904 stl_p r25, osfpcb_l_cc(r0) // save time
2905 ldah r24, (1<<(icsr_v_fpe-16))(r31)
2906
2907 and r22, 1, r12 // isolate fen
2908 mfpr r25, icsr // get current icsr
2909
2910 lda r24, (1<<icsr_v_pmp)(r24)
2911 br r31, swpctx_cont
2912
2913//
2914// wrval - PALcode for wrval instruction
2915//
2916// Entry:
2917// Vectored into via hardware PALcode instruction dispatch.
2918//
2919// Function:
2920// sysvalue <- a0 (r16)
2921//
2922
2923 CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
2924Call_Pal_Wrval:
2925 nop
2926 mtpr r16, pt_sysval // Pad paltemp write
2927 nop
2928 nop
2929 hw_rei
2930
2931//
2932// rdval - PALcode for rdval instruction
2933//
2934// Entry:
2935// Vectored into via hardware PALcode instruction dispatch.
2936//
2937// Function:
2938// v0 (r0) <- sysvalue
2939//
2940
2941 CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
2942Call_Pal_Rdval:
2943 nop
2944 mfpr r0, pt_sysval
2945 nop
2946 hw_rei
2947
2948//
2949// tbi - PALcode for tbi instruction
2950//
2951// Entry:
2952// Vectored into via hardware PALcode instruction dispatch.
2953//
2954// Function:
2955// TB invalidate
2956// r16/a0 = TBI type
2957// r17/a1 = Va for TBISx instructions
2958//
2959
2960 CALL_PAL_PRIV(PAL_TBI_ENTRY)
2961Call_Pal_Tbi:
2962 addq r16, 2, r16 // change range to 0-2
2963 br r23, CALL_PAL_tbi_10_ // get our address
2964
2965CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
2966 lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
2967 sll r16, 4, r16 // * 16
2968 blbc r22, CALL_PAL_tbi_30_ // go rei, if not
2969
2970 addq r23, r16, r23 // addr of our code
2971//orig pvc_jsr tbi
2972 jmp r31, (r23) // and go do it
2973
2974CALL_PAL_tbi_30_:
2975 hw_rei
2976 nop
2977
2978//
2979// wrent - PALcode for wrent instruction
2980//
2981// Entry:
2982// Vectored into via hardware PALcode instruction dispatch.
2983//
2984// Function:
2985// Update ent* in paltemps
2986// r16/a0 = Address of entry routine
2987// r17/a1 = Entry Number 0..5
2988//
2989// r22, r23 trashed
2990//
2991
2992 CALL_PAL_PRIV(PAL_WRENT_ENTRY)
2993Call_Pal_Wrent:
2994 cmpult r17, 6, r22 // see if in range
2995 br r23, CALL_PAL_wrent_10_ // get our address
2996
2997CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
2998 blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
2999
3000 lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
3001 sll r17, 4, r17 // *16
3002
3003 addq r17, r23, r23 // Get address in table
3004//orig pvc_jsr wrent
3005 jmp r31, (r23) // and go do it
3006
3007CALL_PAL_wrent_30_:
3008 hw_rei // out of range, just return
3009
3010//
3011// swpipl - PALcode for swpipl instruction
3012//
3013// Entry:
3014// Vectored into via hardware PALcode instruction dispatch.
3015//
3016// Function:
3017// v0 (r0) <- PS<IPL>
3018// PS<IPL> <- a0<2:0> (r16)
3019//
3020// t8 (r22) is scratch
3021//
3022
3023 CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3024Call_Pal_Swpipl:
3025 and r16, osfps_m_ipl, r16 // clean New ipl
3026 mfpr r22, pt_intmask // get int mask
3027
3028 extbl r22, r16, r22 // get mask for this ipl
3029 bis r11, r31, r0 // return old ipl
3030
3031 bis r16, r31, r11 // set new ps
3032 mtpr r22, ev5__ipl // set new mask
3033
3034 mfpr r31, pt0 // pad ipl write
3035 mfpr r31, pt0 // pad ipl write
3036
3037 hw_rei // back
3038
3039//
3040// rdps - PALcode for rdps instruction
3041//
3042// Entry:
3043// Vectored into via hardware PALcode instruction dispatch.
3044//
3045// Function:
3046// v0 (r0) <- ps
3047//
3048
3049 CALL_PAL_PRIV(PAL_RDPS_ENTRY)
3050Call_Pal_Rdps:
3051 bis r11, r31, r0 // Fetch PALshadow PS
3052 nop // Must be 2 cycles long
3053 hw_rei
3054
3055//
3056// wrkgp - PALcode for wrkgp instruction
3057//
3058// Entry:
3059// Vectored into via hardware PALcode instruction dispatch.
3060//
3061// Function:
3062// kgp <- a0 (r16)
3063//
3064
3065 CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
3066Call_Pal_Wrkgp:
3067 nop
3068 mtpr r16, pt_kgp
3069 nop // Pad for pt write->read restriction
3070 nop
3071 hw_rei
3072
3073//
3074// wrusp - PALcode for wrusp instruction
3075//
3076// Entry:
3077// Vectored into via hardware PALcode instruction dispatch.
3078//
3079// Function:
3080// usp <- a0 (r16)
3081//
3082
3083 CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
3084Call_Pal_Wrusp:
3085 nop
3086 mtpr r16, pt_usp
3087 nop // Pad possible pt write->read restriction
3088 nop
3089 hw_rei
3090
3091//
3092// wrperfmon - PALcode for wrperfmon instruction
3093//
3094// Entry:
3095// Vectored into via hardware PALcode instruction dispatch.
3096//
3097//
3098// Function:
3099// Various control functions for the onchip performance counters
3100//
3101// option selector in r16
3102// option argument in r17
3103// returned status in r0
3104//
3105//
3106// r16 = 0 Disable performance monitoring for one or more cpu's
3107// r17 = 0 disable no counters
3108// r17 = bitmask disable counters specified in bit mask (1=disable)
3109//
3110// r16 = 1 Enable performance monitoring for one or more cpu's
3111// r17 = 0 enable no counters
3112// r17 = bitmask enable counters specified in bit mask (1=enable)
3113//
3114// r16 = 2 Mux select for one or more cpu's
3115// r17 = Mux selection (cpu specific)
3116// <24:19> bc_ctl<pm_mux_sel> field (see spec)
3117// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
3118//
3119// r16 = 3 Options
3120// r17 = (cpu specific)
3121// <0> = 0 log all processes
3122// <0> = 1 log only selected processes
3123// <30,9,8> mode select - ku,kp,kk
3124//
3125// r16 = 4 Interrupt frequency select
3126// r17 = (cpu specific) indicates interrupt frequencies desired for each
3127// counter, with "zero interrupts" being an option
3128// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
3129//
3130// r16 = 5 Read Counters
3131// r17 = na
3132// r0 = value (same format as ev5 pmctr)
3133// <0> = 0 Read failed
3134// <0> = 1 Read succeeded
3135//
3136// r16 = 6 Write Counters
3137// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
3138//
3139// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
3140// r17 = 0 enable no counters
3141// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
3142//
3143//=============================================================================
3144//Assumptions:
3145//PMCTR_CTL:
3146//
3147// <15:14> CTL0 -- encoded frequency select and enable - CTR0
3148// <13:12> CTL1 -- " - CTR1
3149// <11:10> CTL2 -- " - CTR2
3150//
3151// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
3152// <7:6> FRQ1 -- frequency select for CTR1
3153// <5:4> FRQ2 -- frequency select for CTR2
3154//
3155// <0> all vs. select processes (0=all,1=select)
3156//
3157// where
3158// FRQx<1:0>
3159// 0 1 disable interrupt
3160// 1 0 frequency = 65536 (16384 for ctr2)
3161// 1 1 frequency = 256
3162// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
3163//
3164//=============================================================================
3165//
3166 CALL_PAL_PRIV(0x0039)
3167// unsupported in Hudson code .. pboyle Nov/95
3168CALL_PAL_Wrperfmon:
3169 // "real" performance monitoring code
3170 cmpeq r16, 1, r0 // check for enable
3171 bne r0, perfmon_en // br if requested to enable
3172
3173 cmpeq r16, 2, r0 // check for mux ctl
3174 bne r0, perfmon_muxctl // br if request to set mux controls
3175
3176 cmpeq r16, 3, r0 // check for options
3177 bne r0, perfmon_ctl // br if request to set options
3178
3179 cmpeq r16, 4, r0 // check for interrupt frequency select
3180 bne r0, perfmon_freq // br if request to change frequency select
3181
3182 cmpeq r16, 5, r0 // check for counter read request
3183 bne r0, perfmon_rd // br if request to read counters
3184
3185 cmpeq r16, 6, r0 // check for counter write request
3186 bne r0, perfmon_wr // br if request to write counters
3187
3188 cmpeq r16, 7, r0 // check for counter clear/enable request
3189 bne r0, perfmon_enclr // br if request to clear/enable counters
3190
3191 beq r16, perfmon_dis // br if requested to disable (r16=0)
3192 br r31, perfmon_unknown // br if unknown request
3193
3194//
3195// rdusp - PALcode for rdusp instruction
3196//
3197// Entry:
3198// Vectored into via hardware PALcode instruction dispatch.
3199//
3200// Function:
3201// v0 (r0) <- usp
3202//
3203
3204 CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
3205Call_Pal_Rdusp:
3206 nop
3207 mfpr r0, pt_usp
3208 hw_rei
3209
3210
3211 CALL_PAL_PRIV(0x003B)
3212CallPal_OpcDec3B:
3213 br r31, osfpal_calpal_opcdec
3214
3215//
3216// whami - PALcode for whami instruction
3217//
3218// Entry:
3219// Vectored into via hardware PALcode instruction dispatch.
3220//
3221// Function:
3222// v0 (r0) <- whami
3223//
3224 CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
3225Call_Pal_Whami:
3226 nop
3227 mfpr r0, pt_whami // Get Whami
3228 extbl r0, 1, r0 // Isolate just whami bits
3229 hw_rei
3230
3231//
3232// retsys - PALcode for retsys instruction
3233//
3234// Entry:
3235// Vectored into via hardware PALcode instruction dispatch.
3236// 00(sp) contains return pc
3237// 08(sp) contains r29
3238//
3239// Function:
3240// Return from system call.
3241// mode switched from kern to user.
3242// stacks swapped, ugp, upc restored.
3243// r23, r25 junked
3244//
3245
3246 CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
3247Call_Pal_Retsys:
3248 lda r25, osfsf_c_size(sp) // pop stack
3249 bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
3250
3251 mfpr r14, exc_addr // save exc_addr in case of fault
3252 ldq r23, osfsf_pc(sp) // get pc
3253
3254 ldq r29, osfsf_gp(sp) // get gp
3255 stl_c r31, -4(sp) // clear lock_flag
3256
3257 lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
3258 mfpr r30, pt_usp // get users stack
3259
3260 bic r23, 3, r23 // clean return pc
3261 mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
3262
3263 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3264 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3265
3266 mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
3267 mtpr r25, pt_ksp // save kern stack
3268
3269 rc r31 // clear inter_flag
3270// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
3271 hw_rei_spe // and back
3272
3273
3274 CALL_PAL_PRIV(0x003E)
3275CallPal_OpcDec3E:
3276 br r31, osfpal_calpal_opcdec
3277
3278//
3279// rti - PALcode for rti instruction
3280//
3281// Entry:
3282// Vectored into via hardware PALcode instruction dispatch.
3283//
3284// Function:
3285// 00(sp) -> ps
3286// 08(sp) -> pc
3287// 16(sp) -> r29 (gp)
3288// 24(sp) -> r16 (a0)
3289// 32(sp) -> r17 (a1)
3290// 40(sp) -> r18 (a3)
3291//
3292
3293 CALL_PAL_PRIV(PAL_RTI_ENTRY)
3294 /* called once by platform_tlaser */
3295 .globl Call_Pal_Rti
3296Call_Pal_Rti:
3297 lda r25, osfsf_c_size(sp) // get updated sp
3298 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
3299
3300 mfpr r14, exc_addr // save PC in case of fault
3301 rc r31 // clear intr_flag
3302
3303 ldq r12, -6*8(r25) // get ps
3304 ldq r13, -5*8(r25) // pc
3305
3306 ldq r18, -1*8(r25) // a2
3307 ldq r17, -2*8(r25) // a1
3308
3309 ldq r16, -3*8(r25) // a0
3310 ldq r29, -4*8(r25) // gp
3311
3312 bic r13, 3, r13 // clean return pc
3313 stl_c r31, -4(r25) // clear lock_flag
3314
3315 and r12, osfps_m_mode, r11 // get mode
3316 mtpr r13, exc_addr // set return address
3317
3318 beq r11, rti_to_kern // br if rti to Kern
3319 br r31, rti_to_user // out of call_pal space
3320
3321
3322///////////////////////////////////////////////////
3323// Start the Unprivileged CALL_PAL Entry Points
3324///////////////////////////////////////////////////
3325
3326//
3327// bpt - PALcode for bpt instruction
3328//
3329// Entry:
3330// Vectored into via hardware PALcode instruction dispatch.
3331//
3332// Function:
3333// Build stack frame
3334// a0 <- code
3335// a1 <- unpred
3336// a2 <- unpred
3337// vector via entIF
3338//
3339//
3340//
3341 .text 1
3342// . = 0x3000
3343 CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
3344Call_Pal_Bpt:
3345 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3346 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3347
3348 bis r11, r31, r12 // Save PS for stack write
3349 bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
3350
3351 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3352 // no virt ref for next 2 cycles
3353 mtpr r30, pt_usp // save user stack
3354
3355 bis r31, r31, r11 // Set new PS
3356 mfpr r30, pt_ksp
3357
3358CALL_PAL_bpt_10_:
3359 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3360 mfpr r14, exc_addr // get pc
3361
3362 stq r16, osfsf_a0(sp) // save regs
3363 bis r31, osf_a0_bpt, r16 // set a0
3364
3365 stq r17, osfsf_a1(sp) // a1
3366 br r31, bpt_bchk_common // out of call_pal space
3367
3368
3369//
3370// bugchk - PALcode for bugchk instruction
3371//
3372// Entry:
3373// Vectored into via hardware PALcode instruction dispatch.
3374//
3375// Function:
3376// Build stack frame
3377// a0 <- code
3378// a1 <- unpred
3379// a2 <- unpred
3380// vector via entIF
3381//
3382//
3383//
3384 CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
3385Call_Pal_Bugchk:
3386 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3387 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3388
3389 bis r11, r31, r12 // Save PS for stack write
3390 bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
3391
3392 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3393 // no virt ref for next 2 cycles
3394 mtpr r30, pt_usp // save user stack
3395
3396 bis r31, r31, r11 // Set new PS
3397 mfpr r30, pt_ksp
3398
3399CALL_PAL_bugchk_10_:
3400 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3401 mfpr r14, exc_addr // get pc
3402
3403 stq r16, osfsf_a0(sp) // save regs
3404 bis r31, osf_a0_bugchk, r16 // set a0
3405
3406 stq r17, osfsf_a1(sp) // a1
3407 br r31, bpt_bchk_common // out of call_pal space
3408
3409
3410 CALL_PAL_UNPRIV(0x0082)
3411CallPal_OpcDec82:
3412 br r31, osfpal_calpal_opcdec
3413
3414//
3415// callsys - PALcode for callsys instruction
3416//
3417// Entry:
3418// Vectored into via hardware PALcode instruction dispatch.
3419//
3420// Function:
3421// Switch mode to kernel and build a callsys stack frame.
3422// sp = ksp
3423// gp = kgp
3424// t8 - t10 (r22-r24) trashed
3425//
3426//
3427//
3428 CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
3429Call_Pal_Callsys:
3430
3431 and r11, osfps_m_mode, r24 // get mode
3432 mfpr r22, pt_ksp // get ksp
3433
3434 beq r24, sys_from_kern // sysCall from kern is not allowed
3435 mfpr r12, pt_entsys // get address of callSys routine
3436
3437//
3438// from here on we know we are in user going to Kern
3439//
3440 mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3441 mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3442
3443 bis r31, r31, r11 // PS=0 (mode=kern)
3444 mfpr r23, exc_addr // get pc
3445
3446 mtpr r30, pt_usp // save usp
3447 lda sp, 0-osfsf_c_size(r22)// set new sp
3448
3449 stq r29, osfsf_gp(sp) // save user gp/r29
3450 stq r24, osfsf_ps(sp) // save ps
3451
3452 stq r23, osfsf_pc(sp) // save pc
3453 mtpr r12, exc_addr // set address
3454 // 1 cycle to hw_rei
3455
3456 mfpr r29, pt_kgp // get the kern gp/r29
3457
3458 hw_rei_spe // and off we go!
3459
3460
3461 CALL_PAL_UNPRIV(0x0084)
3462CallPal_OpcDec84:
3463 br r31, osfpal_calpal_opcdec
3464
3465 CALL_PAL_UNPRIV(0x0085)
3466CallPal_OpcDec85:
3467 br r31, osfpal_calpal_opcdec
3468
3469//
3470// imb - PALcode for imb instruction
3471//
3472// Entry:
3473// Vectored into via hardware PALcode instruction dispatch.
3474//
3475// Function:
3476// Flush the writebuffer and flush the Icache
3477//
3478//
3479//
3480 CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
3481Call_Pal_Imb:
3482 mb // Clear the writebuffer
3483 mfpr r31, ev5__mcsr // Sync with clear
3484 nop
3485 nop
3486 br r31, pal_ic_flush // Flush Icache
3487
3488
3489// CALL_PAL OPCDECs
3490
3491 CALL_PAL_UNPRIV(0x0087)
3492CallPal_OpcDec87:
3493 br r31, osfpal_calpal_opcdec
3494
3495 CALL_PAL_UNPRIV(0x0088)
3496CallPal_OpcDec88:
3497 br r31, osfpal_calpal_opcdec
3498
3499 CALL_PAL_UNPRIV(0x0089)
3500CallPal_OpcDec89:
3501 br r31, osfpal_calpal_opcdec
3502
3503 CALL_PAL_UNPRIV(0x008A)
3504CallPal_OpcDec8A:
3505 br r31, osfpal_calpal_opcdec
3506
3507 CALL_PAL_UNPRIV(0x008B)
3508CallPal_OpcDec8B:
3509 br r31, osfpal_calpal_opcdec
3510
3511 CALL_PAL_UNPRIV(0x008C)
3512CallPal_OpcDec8C:
3513 br r31, osfpal_calpal_opcdec
3514
3515 CALL_PAL_UNPRIV(0x008D)
3516CallPal_OpcDec8D:
3517 br r31, osfpal_calpal_opcdec
3518
3519 CALL_PAL_UNPRIV(0x008E)
3520CallPal_OpcDec8E:
3521 br r31, osfpal_calpal_opcdec
3522
3523 CALL_PAL_UNPRIV(0x008F)
3524CallPal_OpcDec8F:
3525 br r31, osfpal_calpal_opcdec
3526
3527 CALL_PAL_UNPRIV(0x0090)
3528CallPal_OpcDec90:
3529 br r31, osfpal_calpal_opcdec
3530
3531 CALL_PAL_UNPRIV(0x0091)
3532CallPal_OpcDec91:
3533 br r31, osfpal_calpal_opcdec
3534
3535 CALL_PAL_UNPRIV(0x0092)
3536CallPal_OpcDec92:
3537 br r31, osfpal_calpal_opcdec
3538
3539 CALL_PAL_UNPRIV(0x0093)
3540CallPal_OpcDec93:
3541 br r31, osfpal_calpal_opcdec
3542
3543 CALL_PAL_UNPRIV(0x0094)
3544CallPal_OpcDec94:
3545 br r31, osfpal_calpal_opcdec
3546
3547 CALL_PAL_UNPRIV(0x0095)
3548CallPal_OpcDec95:
3549 br r31, osfpal_calpal_opcdec
3550
3551 CALL_PAL_UNPRIV(0x0096)
3552CallPal_OpcDec96:
3553 br r31, osfpal_calpal_opcdec
3554
3555 CALL_PAL_UNPRIV(0x0097)
3556CallPal_OpcDec97:
3557 br r31, osfpal_calpal_opcdec
3558
3559 CALL_PAL_UNPRIV(0x0098)
3560CallPal_OpcDec98:
3561 br r31, osfpal_calpal_opcdec
3562
3563 CALL_PAL_UNPRIV(0x0099)
3564CallPal_OpcDec99:
3565 br r31, osfpal_calpal_opcdec
3566
3567 CALL_PAL_UNPRIV(0x009A)
3568CallPal_OpcDec9A:
3569 br r31, osfpal_calpal_opcdec
3570
3571 CALL_PAL_UNPRIV(0x009B)
3572CallPal_OpcDec9B:
3573 br r31, osfpal_calpal_opcdec
3574
3575 CALL_PAL_UNPRIV(0x009C)
3576CallPal_OpcDec9C:
3577 br r31, osfpal_calpal_opcdec
3578
3579 CALL_PAL_UNPRIV(0x009D)
3580CallPal_OpcDec9D:
3581 br r31, osfpal_calpal_opcdec
3582
3583//
3584// rdunique - PALcode for rdunique instruction
3585//
3586// Entry:
3587// Vectored into via hardware PALcode instruction dispatch.
3588//
3589// Function:
3590// v0 (r0) <- unique
3591//
3592//
3593//
3594 CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
3595CALL_PALrdunique_:
3596 mfpr r0, pt_pcbb // get pcb pointer
3597 ldq_p r0, osfpcb_q_unique(r0) // get new value
3598
3599 hw_rei
3600
3601//
3602// wrunique - PALcode for wrunique instruction
3603//
3604// Entry:
3605// Vectored into via hardware PALcode instruction dispatch.
3606//
3607// Function:
3608// unique <- a0 (r16)
3609//
3610//
3611//
3612CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
3613CALL_PAL_Wrunique:
3614 nop
3615 mfpr r12, pt_pcbb // get pcb pointer
3616 stq_p r16, osfpcb_q_unique(r12)// get new value
3617 nop // Pad palshadow write
3618 hw_rei // back
3619
3620// CALL_PAL OPCDECs
3621
3622 CALL_PAL_UNPRIV(0x00A0)
3623CallPal_OpcDecA0:
3624 br r31, osfpal_calpal_opcdec
3625
3626 CALL_PAL_UNPRIV(0x00A1)
3627CallPal_OpcDecA1:
3628 br r31, osfpal_calpal_opcdec
3629
3630 CALL_PAL_UNPRIV(0x00A2)
3631CallPal_OpcDecA2:
3632 br r31, osfpal_calpal_opcdec
3633
3634 CALL_PAL_UNPRIV(0x00A3)
3635CallPal_OpcDecA3:
3636 br r31, osfpal_calpal_opcdec
3637
3638 CALL_PAL_UNPRIV(0x00A4)
3639CallPal_OpcDecA4:
3640 br r31, osfpal_calpal_opcdec
3641
3642 CALL_PAL_UNPRIV(0x00A5)
3643CallPal_OpcDecA5:
3644 br r31, osfpal_calpal_opcdec
3645
3646 CALL_PAL_UNPRIV(0x00A6)
3647CallPal_OpcDecA6:
3648 br r31, osfpal_calpal_opcdec
3649
3650 CALL_PAL_UNPRIV(0x00A7)
3651CallPal_OpcDecA7:
3652 br r31, osfpal_calpal_opcdec
3653
3654 CALL_PAL_UNPRIV(0x00A8)
3655CallPal_OpcDecA8:
3656 br r31, osfpal_calpal_opcdec
3657
3658 CALL_PAL_UNPRIV(0x00A9)
3659CallPal_OpcDecA9:
3660 br r31, osfpal_calpal_opcdec
3661
3662
3663//
3664// gentrap - PALcode for gentrap instruction
3665//
3666// CALL_PAL_gentrap:
3667// Entry:
3668// Vectored into via hardware PALcode instruction dispatch.
3669//
3670// Function:
3671// Build stack frame
3672// a0 <- code
3673// a1 <- unpred
3674// a2 <- unpred
3675// vector via entIF
3676//
3677//
3678
3679 CALL_PAL_UNPRIV(0x00AA)
3680// unsupported in Hudson code .. pboyle Nov/95
3681CALL_PAL_gentrap:
3682 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3683 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3684
3685 bis r11, r31, r12 // Save PS for stack write
3686 bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
3687
3688 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3689 // no virt ref for next 2 cycles
3690 mtpr r30, pt_usp // save user stack
3691
3692 bis r31, r31, r11 // Set new PS
3693 mfpr r30, pt_ksp
3694
3695CALL_PAL_gentrap_10_:
3696 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3697 mfpr r14, exc_addr // get pc
3698
3699 stq r16, osfsf_a0(sp) // save regs
3700 bis r31, osf_a0_gentrap, r16// set a0
3701
3702 stq r17, osfsf_a1(sp) // a1
3703 br r31, bpt_bchk_common // out of call_pal space
3704
3705
3706// CALL_PAL OPCDECs
3707
3708 CALL_PAL_UNPRIV(0x00AB)
3709CallPal_OpcDecAB:
3710 br r31, osfpal_calpal_opcdec
3711
3712 CALL_PAL_UNPRIV(0x00AC)
3713CallPal_OpcDecAC:
3714 br r31, osfpal_calpal_opcdec
3715
3716 CALL_PAL_UNPRIV(0x00AD)
3717CallPal_OpcDecAD:
3718 br r31, osfpal_calpal_opcdec
3719
3720 CALL_PAL_UNPRIV(0x00AE)
3721CallPal_OpcDecAE:
3722 br r31, osfpal_calpal_opcdec
3723
3724 CALL_PAL_UNPRIV(0x00AF)
3725CallPal_OpcDecAF:
3726 br r31, osfpal_calpal_opcdec
3727
3728 CALL_PAL_UNPRIV(0x00B0)
3729CallPal_OpcDecB0:
3730 br r31, osfpal_calpal_opcdec
3731
3732 CALL_PAL_UNPRIV(0x00B1)
3733CallPal_OpcDecB1:
3734 br r31, osfpal_calpal_opcdec
3735
3736 CALL_PAL_UNPRIV(0x00B2)
3737CallPal_OpcDecB2:
3738 br r31, osfpal_calpal_opcdec
3739
3740 CALL_PAL_UNPRIV(0x00B3)
3741CallPal_OpcDecB3:
3742 br r31, osfpal_calpal_opcdec
3743
3744 CALL_PAL_UNPRIV(0x00B4)
3745CallPal_OpcDecB4:
3746 br r31, osfpal_calpal_opcdec
3747
3748 CALL_PAL_UNPRIV(0x00B5)
3749CallPal_OpcDecB5:
3750 br r31, osfpal_calpal_opcdec
3751
3752 CALL_PAL_UNPRIV(0x00B6)
3753CallPal_OpcDecB6:
3754 br r31, osfpal_calpal_opcdec
3755
3756 CALL_PAL_UNPRIV(0x00B7)
3757CallPal_OpcDecB7:
3758 br r31, osfpal_calpal_opcdec
3759
3760 CALL_PAL_UNPRIV(0x00B8)
3761CallPal_OpcDecB8:
3762 br r31, osfpal_calpal_opcdec
3763
3764 CALL_PAL_UNPRIV(0x00B9)
3765CallPal_OpcDecB9:
3766 br r31, osfpal_calpal_opcdec
3767
3768 CALL_PAL_UNPRIV(0x00BA)
3769CallPal_OpcDecBA:
3770 br r31, osfpal_calpal_opcdec
3771
3772 CALL_PAL_UNPRIV(0x00BB)
3773CallPal_OpcDecBB:
3774 br r31, osfpal_calpal_opcdec
3775
3776 CALL_PAL_UNPRIV(0x00BC)
3777CallPal_OpcDecBC:
3778 br r31, osfpal_calpal_opcdec
3779
3780 CALL_PAL_UNPRIV(0x00BD)
3781CallPal_OpcDecBD:
3782 br r31, osfpal_calpal_opcdec
3783
3784 CALL_PAL_UNPRIV(0x00BE)
3785CallPal_OpcDecBE:
3786 br r31, osfpal_calpal_opcdec
3787
3788 CALL_PAL_UNPRIV(0x00BF)
3789CallPal_OpcDecBF:
3790 // MODIFIED BY EGH 2/25/04
3791 br r31, copypal_impl
3792
3793
3794/*======================================================================*/
3795/* OSF/1 CALL_PAL CONTINUATION AREA */
3796/*======================================================================*/
3797
3798 .text 2
3799
3800 . = 0x4000
3801
3802
3803// Continuation of MTPR_PERFMON
3804 ALIGN_BLOCK
3805 // "real" performance monitoring code
3806// mux ctl
3807perfmon_muxctl:
3808 lda r8, 1(r31) // get a 1
3809 sll r8, pmctr_v_sel0, r8 // move to sel0 position
3810 or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
3811 and r17, r8, r25 // isolate pmctr mux select bits
3812 mfpr r0, ev5__pmctr
3813 bic r0, r8, r0 // clear old mux select bits
3814 or r0,r25, r25 // or in new mux select bits
3815 mtpr r25, ev5__pmctr
3816
3817 // ok, now tackle cbox mux selects
3818 ldah r14, 0xfff0(r31)
3819 zap r14, 0xE0, r14 // Get Cbox IPR base
3820//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
3821// adapted from ev5_pal_macros.mar
3822 mfpr r16, pt_impure
3823 lda r16, CNS_Q_IPR(r16)
3824 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
3825
3826 lda r8, 0x3F(r31) // build mux select mask
3827 sll r8, bc_ctl_v_pm_mux_sel, r8
3828
3829 and r17, r8, r25 // isolate bc_ctl mux select bits
3830 bic r16, r8, r16 // isolate old mux select bits
3831 or r16, r25, r25 // create new bc_ctl
3832 mb // clear out cbox for future ipr write
3833 stq_p r25, ev5__bc_ctl(r14) // store to cbox ipr
3834 mb // clear out cbox for future ipr write
3835
3836//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
3837// adapted from ev5_pal_macros.mar
3838 mfpr r16, pt_impure
3839 lda r16, CNS_Q_IPR(r16)
3840 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
3841
3842 br r31, perfmon_success
3843
3844
3845// requested to disable perf monitoring
3846perfmon_dis:
3847 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3848perfmon_dis_ctr0: // and begin with ctr0
3849 blbc r17, perfmon_dis_ctr1 // do not disable ctr0
3850 lda r8, 3(r31)
3851 sll r8, pmctr_v_ctl0, r8
3852 bic r14, r8, r14 // disable ctr0
3853perfmon_dis_ctr1:
3854 srl r17, 1, r17
3855 blbc r17, perfmon_dis_ctr2 // do not disable ctr1
3856 lda r8, 3(r31)
3857 sll r8, pmctr_v_ctl1, r8
3858 bic r14, r8, r14 // disable ctr1
3859perfmon_dis_ctr2:
3860 srl r17, 1, r17
3861 blbc r17, perfmon_dis_update // do not disable ctr2
3862 lda r8, 3(r31)
3863 sll r8, pmctr_v_ctl2, r8
3864 bic r14, r8, r14 // disable ctr2
3865perfmon_dis_update:
3866 mtpr r14, ev5__pmctr // update pmctr ipr
3867//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
3868// adapted from ev5_pal_macros.mar
3869//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
3870 mfpr r25, pt_impure
3871 lda r25, CNS_Q_IPR(r25)
3872 RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
3873
3874 lda r17, 0x3F(r31) // build mask
3875 sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
3876 and r14, r17, r14 // isolate ctl bits
3877 bic r8, r17, r8 // clear out old ctl bits
3878 or r14, r8, r14 // create shadow ctl bits
3879//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
3880//adjusted impure pointer still in r25
3881 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
3882
3883 br r31, perfmon_success
3884
3885
3886// requested to enable perf monitoring
3887//;the following code can be greatly simplified for pass2, but should work fine as is.
3888
3889
3890perfmon_enclr:
3891 lda r9, 1(r31) // set enclr flag
3892 br perfmon_en_cont
3893
3894perfmon_en:
3895 bis r31, r31, r9 // clear enclr flag
3896
3897perfmon_en_cont:
3898 mfpr r8, pt_pcbb // get PCB base
3899//orig get_pmctr_ctl r25, r25
3900 mfpr r25, pt_impure
3901 lda r25, CNS_Q_IPR(r25)
3902 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
3903
3904 ldq_p r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
3905 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3906 srl r16, osfpcb_v_pme, r16 // get pme bit
3907 mfpr r13, icsr
3908 and r16, 1, r16 // isolate pme bit
3909
3910 // this code only needed in pass2 and later
3911 lda r12, 1<<icsr_v_pmp(r31) // pb
3912 bic r13, r12, r13 // clear pmp bit
3913 sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
3914 or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
3915 mtpr r13, icsr // update icsr
3916
3917 bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
3918
3919 sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
3920 bis r14, r31, r13 // copy pmctr
3921
3922perfmon_en_ctr0: // and begin with ctr0
3923 blbc r17, perfmon_en_ctr1 // do not enable ctr0
3924
3925 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
3926 lda r8, 0xffff(r31)
3927 zapnot r8, 3, r8 // ctr0<15:0> mask
3928 sll r8, pmctr_v_ctr0, r8
3929 bic r14, r8, r14 // clear ctr bits
3930 bic r13, r8, r13 // clear ctr bits
3931
3932perfmon_en_noclr0:
3933//orig get_addr r8, 3<<pmctr_v_ctl0, r31
3934 LDLI(r8, (3<<pmctr_v_ctl0))
3935 and r25, r8, r12 //isolate frequency select bits for ctr0
3936 bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
3937 or r14,r12,r14 // or in new ctl0 bits
3938
3939perfmon_en_ctr1: // enable ctr1
3940 srl r17, 1, r17 // get ctr1 enable
3941 blbc r17, perfmon_en_ctr2 // do not enable ctr1
3942
3943 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
3944 lda r8, 0xffff(r31)
3945 zapnot r8, 3, r8 // ctr1<15:0> mask
3946 sll r8, pmctr_v_ctr1, r8
3947 bic r14, r8, r14 // clear ctr bits
3948 bic r13, r8, r13 // clear ctr bits
3949
3950perfmon_en_noclr1:
3951//orig get_addr r8, 3<<pmctr_v_ctl1, r31
3952 LDLI(r8, (3<<pmctr_v_ctl1))
3953 and r25, r8, r12 //isolate frequency select bits for ctr1
3954 bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
3955 or r14,r12,r14 // or in new ctl1 bits
3956
3957perfmon_en_ctr2: // enable ctr2
3958 srl r17, 1, r17 // get ctr2 enable
3959 blbc r17, perfmon_en_return // do not enable ctr2 - return
3960
3961 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
3962 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
3963 sll r8, pmctr_v_ctr2, r8
3964 bic r14, r8, r14 // clear ctr bits
3965 bic r13, r8, r13 // clear ctr bits
3966
3967perfmon_en_noclr2:
3968//orig get_addr r8, 3<<pmctr_v_ctl2, r31
3969 LDLI(r8, (3<<pmctr_v_ctl2))
3970 and r25, r8, r12 //isolate frequency select bits for ctr2
3971 bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
3972 or r14,r12,r14 // or in new ctl2 bits
3973
3974perfmon_en_return:
3975 cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
3976 // else only do the counter clears
3977 mtpr r13, ev5__pmctr // update pmctr ipr
3978
3979//;this code not needed for pass2 and later, but does not hurt to leave it in
3980 lda r8, 0x3F(r31)
3981//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
3982 mfpr r12, pt_impure
3983 lda r12, CNS_Q_IPR(r12)
3984 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
3985
3986 sll r8, pmctr_v_ctl2, r8 // build ctl mask
3987 and r8, r14, r14 // isolate new ctl bits
3988 bic r25, r8, r25 // clear out old ctl value
3989 or r25, r14, r14 // create new pmctr_ctl
3990//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
3991 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
3992
3993 br r31, perfmon_success
3994
3995
3996// options...
3997perfmon_ctl:
3998
3999// set mode
4000//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4001 mfpr r12, pt_impure
4002 lda r12, CNS_Q_IPR(r12)
4003 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4004
4005 // build mode mask for pmctr register
4006 LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
4007 mfpr r0, ev5__pmctr
4008 and r17, r8, r25 // isolate pmctr mode bits
4009 bic r0, r8, r0 // clear old mode bits
4010 or r0, r25, r25 // or in new mode bits
4011 mtpr r25, ev5__pmctr
4012
4013 // the following code will only be used in pass2, but should
4014 // not hurt anything if run in pass1.
4015 mfpr r8, icsr
4016 lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
4017 bic r8, r25, r8 // clear old pma bit
4018 cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
4019 or r8, r25, r8
4020 mtpr r8, icsr // 4 bubbles to hw_rei
4021 mfpr r31, pt0 // pad icsr write
4022 mfpr r31, pt0 // pad icsr write
4023
4024 // the following code not needed for pass2 and later, but
4025 // should work anyway.
4026 bis r14, 1, r14 // set for select processes
4027 blbs r17, perfmon_sp // branch if select processes
4028 bic r14, 1, r14 // all processes
4029perfmon_sp:
4030//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4031 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4032 br r31, perfmon_success
4033
4034// counter frequency select
4035perfmon_freq:
4036//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4037 mfpr r12, pt_impure
4038 lda r12, CNS_Q_IPR(r12)
4039 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4040
4041 lda r8, 0x3F(r31)
4042//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
4043// I guess this should be a shift of 4 bits from the above control register structure
4044#define pmctr_ctl_v_frq2_SHIFT 4
4045 sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
4046
4047 and r8, r17, r17
4048 bic r14, r8, r14 // clear out old frequency select bits
4049
4050 or r17, r14, r14 // or in new frequency select info
4051//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4052 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4053
4054 br r31, perfmon_success
4055
4056// read counters
4057perfmon_rd:
4058 mfpr r0, ev5__pmctr
4059 or r0, 1, r0 // or in return status
4060 hw_rei // back to user
4061
4062// write counters
4063perfmon_wr:
4064 mfpr r14, ev5__pmctr
4065 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4066 sll r8, pmctr_v_ctr2, r8
4067
4068 LDLI(r9, (0xFFFFFFFF)) // ctr2<15:0>,ctr1<15:0> mask
4069 sll r9, pmctr_v_ctr1, r9
4070 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
4071 bic r14, r8, r14 // clear ctr fields
4072 and r17, r8, r25 // clear all but ctr fields
4073 or r25, r14, r14 // write ctr fields
4074 mtpr r14, ev5__pmctr // update pmctr ipr
4075
4076 mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
4077
4078perfmon_success:
4079 or r31, 1, r0 // set success
4080 hw_rei // back to user
4081
4082perfmon_unknown:
4083 or r31, r31, r0 // set fail
4084 hw_rei // back to user
4085
4086
4087//////////////////////////////////////////////////////////
4088// Copy code
4089//////////////////////////////////////////////////////////
4090
4091copypal_impl:
4092 mov r16, r0
4093#ifdef CACHE_COPY
4094#ifndef CACHE_COPY_UNALIGNED
4095 and r16, 63, r8
4096 and r17, 63, r9
4097 bis r8, r9, r8
4098 bne r8, cache_copy_done
4099#endif
4100 bic r18, 63, r8
4101 and r18, 63, r18
4102 beq r8, cache_copy_done
4103cache_loop:
4104 ldf f17, 0(r16)
4105 stf f17, 0(r16)
4106 addq r17, 64, r17
4107 addq r16, 64, r16
4108 subq r8, 64, r8
4109 bne r8, cache_loop
4110cache_copy_done:
4111#endif
4112 ble r18, finished // if len <=0 we are finished
4113 ldq_u r8, 0(r17)
4114 xor r17, r16, r9
4115 and r9, 7, r9
4116 and r16, 7, r10
4117 bne r9, unaligned
4118 beq r10, aligned
4119 ldq_u r9, 0(r16)
4120 addq r18, r10, r18
4121 mskqh r8, r17, r8
4122 mskql r9, r17, r9
4123 bis r8, r9, r8
4124aligned:
4125 subq r18, 1, r10
4126 bic r10, 7, r10
4127 and r18, 7, r18
4128 beq r10, aligned_done
4129loop:
4130 stq_u r8, 0(r16)
4131 ldq_u r8, 8(r17)
4132 subq r10, 8, r10
4133 lda r16,8(r16)
4134 lda r17,8(r17)
4135 bne r10, loop
4136aligned_done:
4137 bne r18, few_left
4138 stq_u r8, 0(r16)
4139 br r31, finished
4140 few_left:
4141 mskql r8, r18, r10
4142 ldq_u r9, 0(r16)
4143 mskqh r9, r18, r9
4144 bis r10, r9, r10
4145 stq_u r10, 0(r16)
4146 br r31, finished
4147unaligned:
4148 addq r17, r18, r25
4149 cmpule r18, 8, r9
4150 bne r9, unaligned_few_left
4151 beq r10, unaligned_dest_aligned
4152 and r16, 7, r10
4153 subq r31, r10, r10
4154 addq r10, 8, r10
4155 ldq_u r9, 7(r17)
4156 extql r8, r17, r8
4157 extqh r9, r17, r9
4158 bis r8, r9, r12
4159 insql r12, r16, r12
4160 ldq_u r13, 0(r16)
4161 mskql r13, r16, r13
4162 bis r12, r13, r12
4163 stq_u r12, 0(r16)
4164 addq r16, r10, r16
4165 addq r17, r10, r17
4166 subq r18, r10, r18
4167 ldq_u r8, 0(r17)
4168unaligned_dest_aligned:
4169 subq r18, 1, r10
4170 bic r10, 7, r10
4171 and r18, 7, r18
4172 beq r10, unaligned_partial_left
4173unaligned_loop:
4174 ldq_u r9, 7(r17)
4175 lda r17, 8(r17)
4176 extql r8, r17, r12
4177 extqh r9, r17, r13
4178 subq r10, 8, r10
4179 bis r12, r13, r13
4180 stq r13, 0(r16)
4181 lda r16, 8(r16)
4182 beq r10, unaligned_second_partial_left
4183 ldq_u r8, 7(r17)
4184 lda r17, 8(r17)
4185 extql r9, r17, r12
4186 extqh r8, r17, r13
4187 bis r12, r13, r13
4188 subq r10, 8, r10
4189 stq r13, 0(r16)
4190 lda r16, 8(r16)
4191 bne r10, unaligned_loop
4192unaligned_partial_left:
4193 mov r8, r9
4194unaligned_second_partial_left:
4195 ldq_u r8, -1(r25)
4196 extql r9, r17, r9
4197 extqh r8, r17, r8
4198 bis r8, r9, r8
4199 bne r18, few_left
4200 stq_u r8, 0(r16)
4201 br r31, finished
4202unaligned_few_left:
4203 ldq_u r9, -1(r25)
4204 extql r8, r17, r8
4205 extqh r9, r17, r9
4206 bis r8, r9, r8
4207 insqh r8, r16, r9
4208 insql r8, r16, r8
4209 lda r12, -1(r31)
4210 mskql r12, r18, r13
4211 cmovne r13, r13, r12
4212 insqh r12, r16, r13
4213 insql r12, r16, r12
4214 addq r16, r18, r10
4215 ldq_u r14, 0(r16)
4216 ldq_u r25, -1(r10)
4217 bic r14, r12, r14
4218 bic r25, r13, r25
4219 and r8, r12, r8
4220 and r9, r13, r9
4221 bis r8, r14, r8
4222 bis r9, r25, r9
4223 stq_u r9, -1(r10)
4224 stq_u r8, 0(r16)
4225finished:
4226 hw_rei
33// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
34// since we don't have a mechanism to expand the data structures.... pb Nov/95
35#include "ev5_defs.h"
36#include "ev5_impure.h"
37#include "ev5_alpha_defs.h"
38#include "ev5_paldef.h"
39#include "ev5_osfalpha_defs.h"
40#include "fromHudsonMacros.h"
41#include "fromHudsonOsf.h"
42#include "dc21164FromGasSources.h"
43
44#define DEBUGSTORE(c) nop
45
46#define DEBUG_EXC_ADDR()\
47 bsr r25, put_exc_addr; \
48 DEBUGSTORE(13) ; \
49 DEBUGSTORE(10)
50
51// This is the fix for the user-mode super page references causing the
52// machine to crash.
53#define hw_rei_spe hw_rei
54
55#define vmaj 1
56#define vmin 18
57#define vms_pal 1
58#define osf_pal 2
59#define pal_type osf_pal
60#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
61
62
63///////////////////////////
64// PALtemp register usage
65///////////////////////////
66
67// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
68// for these PALtemps:
69//
70// pt0 local scratch
71// pt1 local scratch
72// pt2 entUna pt_entUna
73// pt3 CPU specific impure area pointer pt_impure
74// pt4 memory management temp
75// pt5 memory management temp
76// pt6 memory management temp
77// pt7 entIF pt_entIF
78// pt8 intmask pt_intmask
79// pt9 entSys pt_entSys
80// pt10
81// pt11 entInt pt_entInt
82// pt12 entArith pt_entArith
83// pt13 reserved for system specific PAL
84// pt14 reserved for system specific PAL
85// pt15 reserved for system specific PAL
86// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami,
87// pt_mces
88// pt17 sysval pt_sysval
89// pt18 usp pt_usp
90// pt19 ksp pt_ksp
91// pt20 PTBR pt_ptbr
92// pt21 entMM pt_entMM
93// pt22 kgp pt_kgp
94// pt23 PCBB pt_pcbb
95//
96//
97
98
99/////////////////////////////
100// PALshadow register usage
101/////////////////////////////
102
103//
104// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
105// This maps the OSF PAL usage of R8 - R14 and R25:
106//
107// r8 ITBmiss/DTBmiss scratch
108// r9 ITBmiss/DTBmiss scratch
109// r10 ITBmiss/DTBmiss scratch
110// r11 PS
111// r12 local scratch
112// r13 local scratch
113// r14 local scratch
114// r25 local scratch
115//
116
117
118
119// .sbttl "PALcode configuration options"
120
121// There are a number of options that may be assembled into this version of
122// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
123// the following). The options that can be adjusted cause the resultant PALcode
124// to reflect the desired target system.
125
126// multiprocessor support can be enabled for a max of n processors by
127// setting the following to the number of processors on the system.
128// Note that this is really the max cpuid.
129
130#define max_cpuid 1
131#ifndef max_cpuid
132#define max_cpuid 8
133#endif
134
135#define osf_svmin 1
136#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
137
138//
139// RESET - Reset Trap Entry Point
140//
141// RESET - offset 0000
142// Entry:
143// Vectored into via hardware trap on reset, or branched to
144// on swppal.
145//
146// r0 = whami
147// r1 = pal_base
148// r2 = base of scratch area
149// r3 = halt code
150//
151//
152// Function:
153//
154//
155
156 .text 0
157 . = 0x0000
158 .globl _start
159 .globl Pal_Base
160_start:
161Pal_Base:
162 HDW_VECTOR(PAL_RESET_ENTRY)
163Trap_Reset:
164 nop
165 /*
166 * store into r1
167 */
168 br r1,sys_reset
169
170 // Specify PAL version info as a constant
171 // at a known location (reset + 8).
172
173 .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
174 .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
175 .long 0
176 .long 0
177pal_impure_start:
178 .quad 0
179pal_debug_ptr:
180 .quad 0 // reserved for debug pointer ; 20
181
182
183//
184// IACCVIO - Istream Access Violation Trap Entry Point
185//
186// IACCVIO - offset 0080
187// Entry:
188// Vectored into via hardware trap on Istream access violation or sign check error on PC.
189//
190// Function:
191// Build stack frame
192// a0 <- Faulting VA
193// a1 <- MMCSR (1 for ACV)
194// a2 <- -1 (for ifetch fault)
195// vector via entMM
196//
197
198 HDW_VECTOR(PAL_IACCVIO_ENTRY)
199Trap_Iaccvio:
200 DEBUGSTORE(0x42)
201 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
202 mtpr r31, ev5__ps // Set Ibox current mode to kernel
203
204 bis r11, r31, r12 // Save PS
205 bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
206
207
208 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
209 // no virt ref for next 2 cycles
210 mtpr r30, pt_usp // save user stack
211
212 bis r31, r31, r12 // Set new PS
213 mfpr r30, pt_ksp
214
215TRAP_IACCVIO_10_:
216 lda sp, 0-osfsf_c_size(sp)// allocate stack space
217 mfpr r14, exc_addr // get pc
218
219 stq r16, osfsf_a0(sp) // save regs
220 bic r14, 3, r16 // pass pc/va as a0
221
222 stq r17, osfsf_a1(sp) // a1
223 or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
224
225 stq r18, osfsf_a2(sp) // a2
226 mfpr r13, pt_entmm // get entry point
227
228 stq r11, osfsf_ps(sp) // save old ps
229 bis r12, r31, r11 // update ps
230
231 stq r16, osfsf_pc(sp) // save pc
232 stq r29, osfsf_gp(sp) // save gp
233
234 mtpr r13, exc_addr // load exc_addr with entMM
235 // 1 cycle to hw_rei
236 mfpr r29, pt_kgp // get the kgp
237
238 subq r31, 1, r18 // pass flag of istream, as a2
239 hw_rei_spe
240
241
242//
243// INTERRUPT - Interrupt Trap Entry Point
244//
245// INTERRUPT - offset 0100
246// Entry:
247// Vectored into via trap on hardware interrupt
248//
249// Function:
250// check for halt interrupt
251// check for passive release (current ipl geq requestor)
252// if necessary, switch to kernel mode push stack frame,
253// update ps (including current mode and ipl copies), sp, and gp
254// pass the interrupt info to the system module
255//
256//
257 HDW_VECTOR(PAL_INTERRUPT_ENTRY)
258Trap_Interrupt:
259 mfpr r13, ev5__intid // Fetch level of interruptor
260 mfpr r25, ev5__isr // Fetch interrupt summary register
261
262 srl r25, isr_v_hlt, r9 // Get HLT bit
263 mfpr r14, ev5__ipl
264
265 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
266 blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
267
268 cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
269 bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
270
271 and r11, osfps_m_mode, r10 // get mode bit
272 beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
273
274 mtpr r30, pt_usp // save user stack
275 mfpr r30, pt_ksp // get kern stack
276
277TRAP_INTERRUPT_10_:
278 lda sp, (0-osfsf_c_size)(sp)// allocate stack space
279 mfpr r14, exc_addr // get pc
280
281 stq r11, osfsf_ps(sp) // save ps
282 stq r14, osfsf_pc(sp) // save pc
283
284 stq r29, osfsf_gp(sp) // push gp
285 stq r16, osfsf_a0(sp) // a0
286
287// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
288 mtpr r31, ev5__ps // Set Ibox current mode to kernel
289 stq r17, osfsf_a1(sp) // a1
290
291 stq r18, osfsf_a2(sp) // a2
292 subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
293
294 srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
295 subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
296
297 cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
298 bis r12, r31, r11 // set new ps
299
300 mfpr r12, pt_intmask
301 and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
302
303 /*
304 * Lance had space problems. We don't.
305 */
306 extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
307 mfpr r29, pt_kgp // update gp
308 mtpr r14, ev5__ipl // load the new IPL into Ibox
309 br r31, sys_interrupt // Go handle interrupt
310
311
312
313//
314// ITBMISS - Istream TBmiss Trap Entry Point
315//
316// ITBMISS - offset 0180
317// Entry:
318// Vectored into via hardware trap on Istream translation buffer miss.
319//
320// Function:
321// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
322// Can trap into DTBMISS_DOUBLE.
323// This routine can use the PALshadow registers r8, r9, and r10
324//
325//
326
327 HDW_VECTOR(PAL_ITB_MISS_ENTRY)
328Trap_Itbmiss:
329 // Real MM mapping
330 nop
331 mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
332
333 nop
334 mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
335
336pal_itb_ldq:
337 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
338 mtpr r10, exc_addr // Restore exc_address if there was a trap.
339
340 mfpr r31, ev5__va // Unlock VA in case there was a double miss
341 nop
342
343 and r8, osfpte_m_foe, r25 // Look for FOE set.
344 blbc r8, invalid_ipte_handler // PTE not valid.
345
346 nop
347 bne r25, foe_ipte_handler // FOE is set
348
349 nop
350 mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
351
352 hw_rei_stall //
353
354
355//
356// DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point
357//
358// DTBMISS_SINGLE - offset 0200
359// Entry:
360// Vectored into via hardware trap on Dstream single translation
361// buffer miss.
362//
363// Function:
364// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
365// Can trap into DTBMISS_DOUBLE.
366// This routine can use the PALshadow registers r8, r9, and r10
367//
368
369 HDW_VECTOR(PAL_DTB_MISS_ENTRY)
370Trap_Dtbmiss_Single:
371 mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
372 mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
373
374// DEBUGSTORE(0x45)
375// DEBUG_EXC_ADDR()
376 // Real MM mapping
377 mfpr r9, ev5__mm_stat // Get read/write bit. E0.
378 mtpr r10, pt6 // Stash exc_addr away
379
380pal_dtb_ldq:
381 ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
382 nop // Pad MF VA
383
384 mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
385 nop
386
387 mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
388 blbc r8, invalid_dpte_handler // Handle invalid PTE
389
390 mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
391 mfpr r10, pt6
392
393 // Following 2 instructions take 2 cycles
394 mtpr r10, exc_addr // Return linkage in case we trapped. E1.
395 mfpr r31, pt0 // Pad the write to dtb_tag
396
397 hw_rei // Done, return
398
399
400//
401// DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point
402//
403//
404// DTBMISS_DOUBLE - offset 0280
405// Entry:
406// Vectored into via hardware trap on Double TBmiss from single
407// miss flows.
408//
409// r8 - faulting VA
410// r9 - original MMstat
411// r10 - original exc_addr (both itb,dtb miss)
412// pt6 - original exc_addr (dtb miss flow only)
413// VA IPR - locked with original faulting VA
414//
415// Function:
416// Get PTE, if valid load TB and return.
417// If not valid then take TNV/ACV exception.
418//
419// pt4 and pt5 are reserved for this flow.
420//
421//
422//
423
424 HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
425Trap_Dtbmiss_double:
426 mtpr r8, pt4 // save r8 to do exc_addr check
427 mfpr r8, exc_addr
428 blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
429 mfpr r8, pt4 // restore r8
430 nop
431 mtpr r22, pt5 // Get some scratch space. E1.
432 // Due to virtual scheme, we can skip the first lookup and go
433 // right to fetch of level 2 PTE
434 sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
435 mtpr r21, pt4 // Get some scratch space. E1.
436
437 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
438 mfpr r21, pt_ptbr // Get physical address of the page table.
439
440 nop
441 addq r21, r22, r21 // Index into page table for level 2 PTE.
442
443 sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
444 ldq_p r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
445
446 srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
447 blbc r21, double_pte_inv // Check for Invalid PTE.
448
449 srl r21, 32, r21 // extract PFN from PTE
450 sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
451
452 addq r21, r22, r21 // Index into page table for level 3 PTE.
453 nop
454
455 ldq_p r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
456 blbc r21, double_pte_inv // Check for invalid PTE.
457
458 mtpr r21, ev5__dtb_pte // Write the PTE. E0.
459 mfpr r22, pt5 // Restore scratch register
460
461 mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
462 mfpr r21, pt4 // Restore scratch register
463
464 nop // Pad write to tag.
465 nop
466
467 nop // Pad write to tag.
468 nop
469
470 hw_rei
471
472
473
474//
475// UNALIGN -- Dstream unalign trap
476//
477// UNALIGN - offset 0300
478// Entry:
479// Vectored into via hardware trap on unaligned Dstream reference.
480//
481// Function:
482// Build stack frame
483// a0 <- Faulting VA
484// a1 <- Opcode
485// a2 <- src/dst register number
486// vector via entUna
487//
488
489 HDW_VECTOR(PAL_UNALIGN_ENTRY)
490Trap_Unalign:
491/* DEBUGSTORE(0x47)*/
492 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
493 mtpr r31, ev5__ps // Set Ibox current mode to kernel
494
495 mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
496 mfpr r14, exc_addr // get pc
497
498 srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
499 blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
500
501 blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
502 // not set, must be a load
503 and r13, 0x1F, r8 // isolate ra
504
505 cmpeq r8, 0x1F, r8 // check for r31/F31
506 bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
507
508UNALIGN_NO_DISMISS:
509 bis r11, r31, r12 // Save PS
510 bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
511
512
513 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
514 // no virt ref for next 2 cycles
515 mtpr r30, pt_usp // save user stack
516
517 bis r31, r31, r12 // Set new PS
518 mfpr r30, pt_ksp
519
520UNALIGN_NO_DISMISS_10_:
521 mfpr r25, ev5__va // Unlock VA
522 lda sp, 0-osfsf_c_size(sp)// allocate stack space
523
524 mtpr r25, pt0 // Stash VA
525 stq r18, osfsf_a2(sp) // a2
526
527 stq r11, osfsf_ps(sp) // save old ps
528 srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
529
530 stq r29, osfsf_gp(sp) // save gp
531 addq r14, 4, r14 // inc PC past the ld/st
532
533 stq r17, osfsf_a1(sp) // a1
534 and r25, mm_stat_m_opcode, r17// Clean opocde for a1
535
536 stq r16, osfsf_a0(sp) // save regs
537 mfpr r16, pt0 // a0 <- va/unlock
538
539 stq r14, osfsf_pc(sp) // save pc
540 mfpr r25, pt_entuna // get entry point
541
542
543 bis r12, r31, r11 // update ps
544 br r31, unalign_trap_cont
545
546
547//
548// DFAULT - Dstream Fault Trap Entry Point
549//
550// DFAULT - offset 0380
551// Entry:
552// Vectored into via hardware trap on dstream fault or sign check
553// error on DVA.
554//
555// Function:
556// Ignore faults on FETCH/FETCH_M
557// Check for DFAULT in PAL
558// Build stack frame
559// a0 <- Faulting VA
560// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
561// a2 <- R/W
562// vector via entMM
563//
564//
565 HDW_VECTOR(PAL_D_FAULT_ENTRY)
566Trap_Dfault:
567// DEBUGSTORE(0x48)
568 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
569 mtpr r31, ev5__ps // Set Ibox current mode to kernel
570
571 mfpr r13, ev5__mm_stat // Get mmstat
572 mfpr r8, exc_addr // get pc, preserve r14
573
574 srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
575 blbs r8, dfault_in_pal
576
577 bis r8, r31, r14 // move exc_addr to correct place
578 bis r11, r31, r12 // Save PS
579
580 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
581 // no virt ref for next 2 cycles
582 and r9, mm_stat_m_opcode, r9 // Clean all but opcode
583
584 cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
585 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
586
587 //dismiss exception if load to r31/f31
588 blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
589
590 // not a store or fetch, must be a load
591 srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
592
593 and r9, 0x1F, r9 // isolate rnum
594 nop
595
596 cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
597 bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
598
599dfault_no_dismiss:
600 and r13, 0xf, r13 // Clean extra bits in mm_stat
601 bge r25, dfault_trap_cont // no stack swap needed if cm=kern
602
603
604 mtpr r30, pt_usp // save user stack
605 bis r31, r31, r12 // Set new PS
606
607 mfpr r30, pt_ksp
608 br r31, dfault_trap_cont
609
610
611//
612// MCHK - Machine Check Trap Entry Point
613//
614// MCHK - offset 0400
615// Entry:
616// Vectored into via hardware trap on machine check.
617//
618// Function:
619//
620//
621
622 HDW_VECTOR(PAL_MCHK_ENTRY)
623Trap_Mchk:
624 DEBUGSTORE(0x49)
625 mtpr r31, ic_flush_ctl // Flush the Icache
626 br r31, sys_machine_check
627
628
629//
630// OPCDEC - Illegal Opcode Trap Entry Point
631//
632// OPCDEC - offset 0480
633// Entry:
634// Vectored into via hardware trap on illegal opcode.
635//
636// Build stack frame
637// a0 <- code
638// a1 <- unpred
639// a2 <- unpred
640// vector via entIF
641//
642//
643
644 HDW_VECTOR(PAL_OPCDEC_ENTRY)
645Trap_Opcdec:
646 DEBUGSTORE(0x4a)
647//simos DEBUG_EXC_ADDR()
648 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
649 mtpr r31, ev5__ps // Set Ibox current mode to kernel
650
651 mfpr r14, exc_addr // get pc
652 blbs r14, pal_pal_bug_check // check opcdec in palmode
653
654 bis r11, r31, r12 // Save PS
655 bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
656
657
658 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
659 // no virt ref for next 2 cycles
660 mtpr r30, pt_usp // save user stack
661
662 bis r31, r31, r12 // Set new PS
663 mfpr r30, pt_ksp
664
665TRAP_OPCDEC_10_:
666 lda sp, 0-osfsf_c_size(sp)// allocate stack space
667 addq r14, 4, r14 // inc pc
668
669 stq r16, osfsf_a0(sp) // save regs
670 bis r31, osf_a0_opdec, r16 // set a0
671
672 stq r11, osfsf_ps(sp) // save old ps
673 mfpr r13, pt_entif // get entry point
674
675 stq r18, osfsf_a2(sp) // a2
676 stq r17, osfsf_a1(sp) // a1
677
678 stq r29, osfsf_gp(sp) // save gp
679 stq r14, osfsf_pc(sp) // save pc
680
681 bis r12, r31, r11 // update ps
682 mtpr r13, exc_addr // load exc_addr with entIF
683 // 1 cycle to hw_rei, E1
684
685 mfpr r29, pt_kgp // get the kgp, E1
686
687 hw_rei_spe // done, E1
688
689
690//
691// ARITH - Arithmetic Exception Trap Entry Point
692//
693// ARITH - offset 0500
694// Entry:
695// Vectored into via hardware trap on arithmetic excpetion.
696//
697// Function:
698// Build stack frame
699// a0 <- exc_sum
700// a1 <- exc_mask
701// a2 <- unpred
702// vector via entArith
703//
704//
705 HDW_VECTOR(PAL_ARITH_ENTRY)
706Trap_Arith:
707 DEBUGSTORE(0x4b)
708 and r11, osfps_m_mode, r12 // get mode bit
709 mfpr r31, ev5__va // unlock mbox
710
711 bis r11, r31, r25 // save ps
712 mfpr r14, exc_addr // get pc
713
714 nop
715 blbs r14, pal_pal_bug_check // arith trap from PAL
716
717 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
718 // no virt ref for next 2 cycles
719 beq r12, TRAP_ARITH_10_ // if zero we are in kern now
720
721 bis r31, r31, r25 // set the new ps
722 mtpr r30, pt_usp // save user stack
723
724 nop
725 mfpr r30, pt_ksp // get kern stack
726
727TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
728 mtpr r31, ev5__ps // Set Ibox current mode to kernel
729
730 nop // Pad current mode write and stq
731 mfpr r13, ev5__exc_sum // get the exc_sum
732
733 mfpr r12, pt_entarith
734 stq r14, osfsf_pc(sp) // save pc
735
736 stq r17, osfsf_a1(sp)
737 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
738
739 stq r11, osfsf_ps(sp) // save ps
740 bis r25, r31, r11 // set new ps
741
742 stq r16, osfsf_a0(sp) // save regs
743 srl r13, exc_sum_v_swc, r16 // shift data to correct position
744
745 stq r18, osfsf_a2(sp)
746// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
747 mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
748
749 stq r29, osfsf_gp(sp)
750 mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
751
752 mfpr r29, pt_kgp // get the kern gp - E1
753 hw_rei_spe // done - E1
754
755
756//
757// FEN - Illegal Floating Point Operation Trap Entry Point
758//
759// FEN - offset 0580
760// Entry:
761// Vectored into via hardware trap on illegal FP op.
762//
763// Function:
764// Build stack frame
765// a0 <- code
766// a1 <- unpred
767// a2 <- unpred
768// vector via entIF
769//
770//
771
772 HDW_VECTOR(PAL_FEN_ENTRY)
773Trap_Fen:
774 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
775 mtpr r31, ev5__ps // Set Ibox current mode to kernel
776
777 mfpr r14, exc_addr // get pc
778 blbs r14, pal_pal_bug_check // check opcdec in palmode
779
780 mfpr r13, ev5__icsr
781 nop
782
783 bis r11, r31, r12 // Save PS
784 bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
785
786 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
787 // no virt ref for next 2 cycles
788 mtpr r30, pt_usp // save user stack
789
790 bis r31, r31, r12 // Set new PS
791 mfpr r30, pt_ksp
792
793TRAP_FEN_10_:
794 lda sp, 0-osfsf_c_size(sp)// allocate stack space
795 srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
796
797
798 stq r16, osfsf_a0(sp) // save regs
799 mfpr r13, pt_entif // get entry point
800
801 stq r18, osfsf_a2(sp) // a2
802 stq r11, osfsf_ps(sp) // save old ps
803
804 stq r29, osfsf_gp(sp) // save gp
805 bis r12, r31, r11 // set new ps
806
807 stq r17, osfsf_a1(sp) // a1
808 blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
809
810 bis r31, osf_a0_fen, r16 // set a0
811 stq r14, osfsf_pc(sp) // save pc
812
813 mtpr r13, exc_addr // load exc_addr with entIF
814 // 1 cycle to hw_rei -E1
815
816 mfpr r29, pt_kgp // get the kgp -E1
817
818 hw_rei_spe // done -E1
819
820// FEN trap was taken, but the fault is really opcdec.
821 ALIGN_BRANCH
822fen_to_opcdec:
823 addq r14, 4, r14 // save PC+4
824 bis r31, osf_a0_opdec, r16 // set a0
825
826 stq r14, osfsf_pc(sp) // save pc
827 mtpr r13, exc_addr // load exc_addr with entIF
828 // 1 cycle to hw_rei
829
830 mfpr r29, pt_kgp // get the kgp
831 hw_rei_spe // done
832
833
834
835//////////////////////////////////////////////////////////////////////////////
836// Misc handlers - Start area for misc code.
837//////////////////////////////////////////////////////////////////////////////
838
839//
840// dfault_trap_cont
841// A dfault trap has been taken. The sp has been updated if necessary.
842// Push a stack frame a vector via entMM.
843//
844// Current state:
845// r12 - new PS
846// r13 - MMstat
847// VA - locked
848//
849//
850 ALIGN_BLOCK
851dfault_trap_cont:
852 lda sp, 0-osfsf_c_size(sp)// allocate stack space
853 mfpr r25, ev5__va // Fetch VA/unlock
854
855 stq r18, osfsf_a2(sp) // a2
856 and r13, 1, r18 // Clean r/w bit for a2
857
858 stq r16, osfsf_a0(sp) // save regs
859 bis r25, r31, r16 // a0 <- va
860
861 stq r17, osfsf_a1(sp) // a1
862 srl r13, 1, r17 // shift fault bits to right position
863
864 stq r11, osfsf_ps(sp) // save old ps
865 bis r12, r31, r11 // update ps
866
867 stq r14, osfsf_pc(sp) // save pc
868 mfpr r25, pt_entmm // get entry point
869
870 stq r29, osfsf_gp(sp) // save gp
871 cmovlbs r17, 1, r17 // a2. acv overrides fox.
872
873 mtpr r25, exc_addr // load exc_addr with entMM
874 // 1 cycle to hw_rei
875 mfpr r29, pt_kgp // get the kgp
876
877 hw_rei_spe // done
878
879//
880//unalign_trap_cont
881// An unalign trap has been taken. Just need to finish up a few things.
882//
883// Current state:
884// r25 - entUna
885// r13 - shifted MMstat
886//
887//
888 ALIGN_BLOCK
889unalign_trap_cont:
890 mtpr r25, exc_addr // load exc_addr with entUna
891 // 1 cycle to hw_rei
892
893
894 mfpr r29, pt_kgp // get the kgp
895 and r13, mm_stat_m_ra, r18 // Clean Ra for a2
896
897 hw_rei_spe // done
898
899
900
901//
902// dfault_in_pal
903// Dfault trap was taken, exc_addr points to a PAL PC.
904// r9 - mmstat<opcode> right justified
905// r8 - exception address
906//
907// These are the cases:
908// opcode was STQ -- from a stack builder, KSP not valid halt
909// r14 - original exc_addr
910// r11 - original PS
911// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
912// KSP not valid halt
913// r11 - original PS
914// r14 - original exc_addr
915// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
916// r11 - original PS
917// r14 - original exc_addr
918// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
919// r10 - original exc_addr
920// r11 - original PS
921//
922//
923//
924 ALIGN_BLOCK
925dfault_in_pal:
926 DEBUGSTORE(0x50)
927 bic r8, 3, r8 // Clean PC
928 mfpr r9, pal_base
929
930 mfpr r31, va // unlock VA
931
932 // if not real_mm, should never get here from miss flows
933
934 subq r9, r8, r8 // pal_base - offset
935
936 lda r9, pal_itb_ldq-pal_base(r8)
937 nop
938
939 beq r9, dfault_do_bugcheck
940 lda r9, pal_dtb_ldq-pal_base(r8)
941
942 beq r9, dfault_do_bugcheck
943
944//
945// KSP invalid halt case --
946ksp_inval_halt:
947 DEBUGSTORE(76)
948 bic r11, osfps_m_mode, r11 // set ps to kernel mode
949 mtpr r0, pt0
950
951 mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
952 mtpr r31, ips
953
954 mtpr r14, exc_addr // Set PC to instruction that caused trouble
955 bsr r0, pal_update_pcb // update the pcb
956
957 lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
958 br r31, sys_enter_console // enter the console
959
960 ALIGN_BRANCH
961dfault_do_bugcheck:
962 bis r10, r31, r14 // bugcheck expects exc_addr in r14
963 br r31, pal_pal_bug_check
964
965
966//
967// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
968// On entry -
969// r14 - exc_addr
970// VA is locked
971//
972//
973 ALIGN_BLOCK
974dfault_fetch_ldr31_err:
975 mtpr r11, ev5__dtb_cm
976 mtpr r11, ev5__ps // Make sure ps hasn't changed
977
978 mfpr r31, va // unlock the mbox
979 addq r14, 4, r14 // inc the pc to skip the fetch
980
981 mtpr r14, exc_addr // give ibox new PC
982 mfpr r31, pt0 // pad exc_addr write
983
984 hw_rei
985
986
987
988 ALIGN_BLOCK
989//
990// sys_from_kern
991// callsys from kernel mode - OS bugcheck machine check
992//
993//
994sys_from_kern:
995 mfpr r14, exc_addr // PC points to call_pal
996 subq r14, 4, r14
997
998 lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
999 br r31, pal_pal_mchk
1000
1001
1002// Continuation of long call_pal flows
1003//
1004// wrent_tbl
1005// Table to write *int in paltemps.
1006// 4 instructions/entry
1007// r16 has new value
1008//
1009//
1010 ALIGN_BLOCK
1011wrent_tbl:
1012//orig pvc_jsr wrent, dest=1
1013 nop
1014 mtpr r16, pt_entint
1015
1016 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1017 hw_rei
1018
1019
1020//orig pvc_jsr wrent, dest=1
1021 nop
1022 mtpr r16, pt_entarith
1023
1024 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1025 hw_rei
1026
1027
1028//orig pvc_jsr wrent, dest=1
1029 nop
1030 mtpr r16, pt_entmm
1031
1032 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1033 hw_rei
1034
1035
1036//orig pvc_jsr wrent, dest=1
1037 nop
1038 mtpr r16, pt_entif
1039
1040 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1041 hw_rei
1042
1043
1044//orig pvc_jsr wrent, dest=1
1045 nop
1046 mtpr r16, pt_entuna
1047
1048 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1049 hw_rei
1050
1051
1052//orig pvc_jsr wrent, dest=1
1053 nop
1054 mtpr r16, pt_entsys
1055
1056 mfpr r31, pt0 // Pad for mt->mf paltemp rule
1057 hw_rei
1058
1059 ALIGN_BLOCK
1060//
1061// tbi_tbl
1062// Table to do tbi instructions
1063// 4 instructions per entry
1064//
1065tbi_tbl:
1066 // -2 tbia
1067//orig pvc_jsr tbi, dest=1
1068 mtpr r31, ev5__dtb_ia // Flush DTB
1069 mtpr r31, ev5__itb_ia // Flush ITB
1070
1071 hw_rei_stall
1072
1073 nop // Pad table
1074
1075 // -1 tbiap
1076//orig pvc_jsr tbi, dest=1
1077 mtpr r31, ev5__dtb_iap // Flush DTB
1078 mtpr r31, ev5__itb_iap // Flush ITB
1079
1080 hw_rei_stall
1081
1082 nop // Pad table
1083
1084
1085 // 0 unused
1086//orig pvc_jsr tbi, dest=1
1087 hw_rei // Pad table
1088 nop
1089 nop
1090 nop
1091
1092
1093 // 1 tbisi
1094//orig pvc_jsr tbi, dest=1
1095
1096 nop
1097 nop
1098 mtpr r17, ev5__itb_is // Flush ITB
1099 hw_rei_stall
1100
1101 // 2 tbisd
1102//orig pvc_jsr tbi, dest=1
1103 mtpr r17, ev5__dtb_is // Flush DTB.
1104 nop
1105
1106 nop
1107 hw_rei_stall
1108
1109
1110 // 3 tbis
1111//orig pvc_jsr tbi, dest=1
1112 mtpr r17, ev5__dtb_is // Flush DTB
1113 br r31, tbi_finish
1114 ALIGN_BRANCH
1115tbi_finish:
1116 mtpr r17, ev5__itb_is // Flush ITB
1117 hw_rei_stall
1118
1119
1120
1121 ALIGN_BLOCK
1122//
1123// bpt_bchk_common:
1124// Finish up the bpt/bchk instructions
1125//
1126bpt_bchk_common:
1127 stq r18, osfsf_a2(sp) // a2
1128 mfpr r13, pt_entif // get entry point
1129
1130 stq r12, osfsf_ps(sp) // save old ps
1131 stq r14, osfsf_pc(sp) // save pc
1132
1133 stq r29, osfsf_gp(sp) // save gp
1134 mtpr r13, exc_addr // load exc_addr with entIF
1135 // 1 cycle to hw_rei
1136
1137 mfpr r29, pt_kgp // get the kgp
1138
1139
1140 hw_rei_spe // done
1141
1142
1143 ALIGN_BLOCK
1144//
1145// rti_to_user
1146// Finish up the rti instruction
1147//
1148rti_to_user:
1149 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
1150 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
1151
1152 mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
1153 mtpr r25, pt_ksp // save off incase RTI to user
1154
1155 mfpr r30, pt_usp
1156 hw_rei_spe // and back
1157
1158
1159 ALIGN_BLOCK
1160//
1161// rti_to_kern
1162// Finish up the rti instruction
1163//
1164rti_to_kern:
1165 and r12, osfps_m_ipl, r11 // clean ps
1166 mfpr r12, pt_intmask // get int mask
1167
1168 extbl r12, r11, r12 // get mask for this ipl
1169 mtpr r25, pt_ksp // save off incase RTI to user
1170
1171 mtpr r12, ev5__ipl // set the new ipl.
1172 or r25, r31, sp // sp
1173
1174// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
1175 hw_rei
1176
1177 ALIGN_BLOCK
1178//
1179// swpctx_cont
1180// Finish up the swpctx instruction
1181//
1182
1183swpctx_cont:
1184
1185 bic r25, r24, r25 // clean icsr<FPE,PMP>
1186 sll r12, icsr_v_fpe, r12 // shift new fen to pos
1187
1188 ldq_p r14, osfpcb_q_mmptr(r16)// get new mmptr
1189 srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
1190
1191 or r25, r12, r25 // icsr with new fen
1192 srl r23, 32, r24 // move asn to low asn pos
1193
1194 and r22, 1, r22
1195 sll r24, itb_asn_v_asn, r12
1196
1197 sll r22, icsr_v_pmp, r22
1198 nop
1199
1200 or r25, r22, r25 // icsr with new pme
1201
1202 sll r24, dtb_asn_v_asn, r24
1203
1204 subl r23, r13, r13 // gen new cc offset
1205 mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
1206
1207 mtpr r24, dtb_asn // Load up new ASN
1208 mtpr r25, icsr // write the icsr
1209
1210 sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
1211 ldq_p r25, osfpcb_q_usp(r16) // get new usp
1212
1213 insll r13, 4, r13 // >> 32
1214// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1215 mtpr r14, pt_ptbr // load the new ptbr
1216
1217 mtpr r13, cc // set new offset
1218 ldq_p r30, osfpcb_q_ksp(r16) // get new ksp
1219
1220// pvc_violate 379 // ldq_p can't trap except replay. only problem if mf same ipr in same shadow
1221 mtpr r25, pt_usp // save usp
1222
1223no_pm_change_10_: hw_rei_stall // back we go
1224
1225 ALIGN_BLOCK
1226//
1227// swppal_cont - finish up the swppal call_pal
1228//
1229
1230swppal_cont:
1231 mfpr r2, pt_misc // get misc bits
1232 sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
1233 or r2, r0, r2 // set the bit
1234 mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
1235 mtpr r2, pt_misc // update the chip
1236
1237 or r3, r31, r4
1238 mfpr r3, pt_impure // pass pointer to the impure area in r3
1239//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
1240//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
1241//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
1242//orig unfix_impure_ipr r3 // restore impure pointer
1243 lda r3, CNS_Q_IPR(r3)
1244 RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
1245 RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
1246 lda r3, -CNS_Q_IPR(r3)
1247
1248 or r31, r31, r0 // set status to success
1249// pvc_violate 1007
1250 jmp r31, (r4) // and call our friend, it's her problem now
1251
1252
1253swppal_fail:
1254 addq r0, 1, r0 // set unknown pal or not loaded
1255 hw_rei // and return
1256
1257
1258// .sbttl "Memory management"
1259
1260 ALIGN_BLOCK
1261//
1262//foe_ipte_handler
1263// IFOE detected on level 3 pte, sort out FOE vs ACV
1264//
1265// on entry:
1266// with
1267// R8 = pte
1268// R10 = pc
1269//
1270// Function
1271// Determine TNV vs ACV vs FOE. Build stack and dispatch
1272// Will not be here if TNV.
1273//
1274
1275foe_ipte_handler:
1276 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1277 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1278
1279 bis r11, r31, r12 // Save PS for stack write
1280 bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
1281
1282
1283 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1284 // no virt ref for next 2 cycles
1285 mtpr r30, pt_usp // save user stack
1286
1287 bis r31, r31, r11 // Set new PS
1288 mfpr r30, pt_ksp
1289
1290 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1291 nop
1292
1293foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1294 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1295
1296 or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
1297 mfpr r13, pt_entmm // get entry point
1298
1299 stq r16, osfsf_a0(sp) // a0
1300 or r14, r31, r16 // pass pc/va as a0
1301
1302 stq r17, osfsf_a1(sp) // a1
1303 nop
1304
1305 stq r18, osfsf_a2(sp) // a2
1306 lda r17, mmcsr_c_acv(r31) // assume ACV
1307
1308 stq r16, osfsf_pc(sp) // save pc
1309 cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
1310
1311 stq r12, osfsf_ps(sp) // save ps
1312 subq r31, 1, r18 // pass flag of istream as a2
1313
1314 stq r29, osfsf_gp(sp)
1315 mtpr r13, exc_addr // set vector address
1316
1317 mfpr r29, pt_kgp // load kgp
1318 hw_rei_spe // out to exec
1319
1320 ALIGN_BLOCK
1321//
1322//invalid_ipte_handler
1323// TNV detected on level 3 pte, sort out TNV vs ACV
1324//
1325// on entry:
1326// with
1327// R8 = pte
1328// R10 = pc
1329//
1330// Function
1331// Determine TNV vs ACV. Build stack and dispatch.
1332//
1333
1334invalid_ipte_handler:
1335 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1336 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1337
1338 bis r11, r31, r12 // Save PS for stack write
1339 bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
1340
1341
1342 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1343 // no virt ref for next 2 cycles
1344 mtpr r30, pt_usp // save user stack
1345
1346 bis r31, r31, r11 // Set new PS
1347 mfpr r30, pt_ksp
1348
1349 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1350 nop
1351
1352invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
1353 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1354
1355 or r10, r31, r14 // Save pc/va in case TBmiss on stack
1356 mfpr r13, pt_entmm // get entry point
1357
1358 stq r16, osfsf_a0(sp) // a0
1359 or r14, r31, r16 // pass pc/va as a0
1360
1361 stq r17, osfsf_a1(sp) // a1
1362 nop
1363
1364 stq r18, osfsf_a2(sp) // a2
1365 and r25, 1, r17 // Isolate kre
1366
1367 stq r16, osfsf_pc(sp) // save pc
1368 xor r17, 1, r17 // map to acv/tnv as a1
1369
1370 stq r12, osfsf_ps(sp) // save ps
1371 subq r31, 1, r18 // pass flag of istream as a2
1372
1373 stq r29, osfsf_gp(sp)
1374 mtpr r13, exc_addr // set vector address
1375
1376 mfpr r29, pt_kgp // load kgp
1377 hw_rei_spe // out to exec
1378
1379
1380
1381
1382 ALIGN_BLOCK
1383//
1384//invalid_dpte_handler
1385// INVALID detected on level 3 pte, sort out TNV vs ACV
1386//
1387// on entry:
1388// with
1389// R10 = va
1390// R8 = pte
1391// R9 = mm_stat
1392// PT6 = pc
1393//
1394// Function
1395// Determine TNV vs ACV. Build stack and dispatch
1396//
1397
1398
1399invalid_dpte_handler:
1400 mfpr r12, pt6
1401 blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
1402
1403 bis r12, r31, r14 // save PC in case of tbmiss or fault
1404 srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
1405
1406 mtpr r11, pt0 // Save PS for stack write
1407 and r25, mm_stat_m_opcode, r25 // isolate opcode
1408
1409 cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
1410 blbs r25, nmiss_fetch_ldr31_err // yes
1411
1412 //dismiss exception if load to r31/f31
1413 blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
1414
1415 // not a store or fetch, must be a load
1416 srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
1417
1418 and r25, 0x1F, r25 // isolate rnum
1419 nop
1420
1421 cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
1422 bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
1423
1424invalid_dpte_no_dismiss:
1425 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1426 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1427
1428 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1429 // no virt ref for next 2 cycles
1430 bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
1431
1432 srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
1433 mtpr r30, pt_usp // save user stack
1434
1435 bis r31, r31, r11 // Set new PS
1436 mfpr r30, pt_ksp
1437
1438invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
1439 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1440
1441 or r10, r31, r25 // Save va in case TBmiss on stack
1442 and r9, 1, r13 // save r/w flag
1443
1444 stq r16, osfsf_a0(sp) // a0
1445 or r25, r31, r16 // pass va as a0
1446
1447 stq r17, osfsf_a1(sp) // a1
1448 or r31, mmcsr_c_acv, r17 // assume acv
1449
1450 srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
1451 stq r29, osfsf_gp(sp)
1452
1453 stq r18, osfsf_a2(sp) // a2
1454 cmovlbs r13, r25, r12 // if write access move acv based on write enable
1455
1456 or r13, r31, r18 // pass flag of dstream access and read vs write
1457 mfpr r25, pt0 // get ps
1458
1459 stq r14, osfsf_pc(sp) // save pc
1460 mfpr r13, pt_entmm // get entry point
1461
1462 stq r25, osfsf_ps(sp) // save ps
1463 mtpr r13, exc_addr // set vector address
1464
1465 mfpr r29, pt_kgp // load kgp
1466 cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
1467
1468 hw_rei_spe // out to exec
1469
1470//
1471//
1472// We come here if we are erring on a dtb_miss, and the instr is a
1473// fetch, fetch_m, of load to r31/f31.
1474// The PC is incremented, and we return to the program.
1475// essentially ignoring the instruction and error.
1476//
1477//
1478 ALIGN_BLOCK
1479nmiss_fetch_ldr31_err:
1480 mfpr r12, pt6
1481 addq r12, 4, r12 // bump pc to pc+4
1482
1483 mtpr r12, exc_addr // and set entry point
1484 mfpr r31, pt0 // pad exc_addr write
1485
1486 hw_rei //
1487
1488 ALIGN_BLOCK
1489//
1490// double_pte_inv
1491// We had a single tbmiss which turned into a double tbmiss which found
1492// an invalid PTE. Return to single miss with a fake pte, and the invalid
1493// single miss flow will report the error.
1494//
1495// on entry:
1496// r21 PTE
1497// r22 available
1498// VA IPR locked with original fault VA
1499// pt4 saved r21
1500// pt5 saved r22
1501// pt6 original exc_addr
1502//
1503// on return to tbmiss flow:
1504// r8 fake PTE
1505//
1506//
1507//
1508double_pte_inv:
1509 srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
1510 mfpr r22, exc_addr // get the pc
1511
1512 lda r22, 4(r22) // inc the pc
1513 lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
1514
1515 cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
1516 mtpr r22, exc_addr // set for rei
1517
1518 mfpr r21, pt4 // restore regs
1519 mfpr r22, pt5 // restore regs
1520
1521 hw_rei // back to tb miss
1522
1523 ALIGN_BLOCK
1524//
1525//tnv_in_pal
1526// The only places in pal that ld or store are the
1527// stack builders, rti or retsys. Any of these mean we
1528// need to take a ksp not valid halt.
1529//
1530//
1531tnv_in_pal:
1532
1533
1534 br r31, ksp_inval_halt
1535
1536
1537// .sbttl "Icache flush routines"
1538
1539 ALIGN_BLOCK
1540//
1541// Common Icache flush routine.
1542//
1543//
1544//
1545pal_ic_flush:
1546 nop
1547 mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
1548 nop
1549 nop
1550
1551// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
1552 nop
1553 nop
1554 nop
1555 nop
1556
1557 nop
1558 nop
1559 nop
1560 nop
1561
1562 nop
1563 nop // 10
1564
1565 nop
1566 nop
1567 nop
1568 nop
1569
1570 nop
1571 nop
1572 nop
1573 nop
1574
1575 nop
1576 nop // 20
1577
1578 nop
1579 nop
1580 nop
1581 nop
1582
1583 nop
1584 nop
1585 nop
1586 nop
1587
1588 nop
1589 nop // 30
1590 nop
1591 nop
1592 nop
1593 nop
1594
1595 nop
1596 nop
1597 nop
1598 nop
1599
1600 nop
1601 nop // 40
1602
1603 nop
1604 nop
1605
1606one_cycle_and_hw_rei:
1607 nop
1608 nop
1609
1610 hw_rei_stall
1611
1612 ALIGN_BLOCK
1613//
1614//osfpal_calpal_opcdec
1615// Here for all opcdec CALL_PALs
1616//
1617// Build stack frame
1618// a0 <- code
1619// a1 <- unpred
1620// a2 <- unpred
1621// vector via entIF
1622//
1623//
1624
1625osfpal_calpal_opcdec:
1626 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
1627 mtpr r31, ev5__ps // Set Ibox current mode to kernel
1628
1629 mfpr r14, exc_addr // get pc
1630 nop
1631
1632 bis r11, r31, r12 // Save PS for stack write
1633 bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
1634
1635
1636 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
1637 // no virt ref for next 2 cycles
1638 mtpr r30, pt_usp // save user stack
1639
1640 bis r31, r31, r11 // Set new PS
1641 mfpr r30, pt_ksp
1642
1643osfpal_calpal_opcdec_10_:
1644 lda sp, 0-osfsf_c_size(sp)// allocate stack space
1645 nop
1646
1647 stq r16, osfsf_a0(sp) // save regs
1648 bis r31, osf_a0_opdec, r16 // set a0
1649
1650 stq r18, osfsf_a2(sp) // a2
1651 mfpr r13, pt_entif // get entry point
1652
1653 stq r12, osfsf_ps(sp) // save old ps
1654 stq r17, osfsf_a1(sp) // a1
1655
1656 stq r14, osfsf_pc(sp) // save pc
1657 nop
1658
1659 stq r29, osfsf_gp(sp) // save gp
1660 mtpr r13, exc_addr // load exc_addr with entIF
1661 // 1 cycle to hw_rei
1662
1663 mfpr r29, pt_kgp // get the kgp
1664
1665
1666 hw_rei_spe // done
1667
1668
1669
1670
1671
1672//
1673//pal_update_pcb
1674// Update the PCB with the current SP, AST, and CC info
1675//
1676// r0 - return linkage
1677//
1678 ALIGN_BLOCK
1679
1680pal_update_pcb:
1681 mfpr r12, pt_pcbb // get pcbb
1682 and r11, osfps_m_mode, r25 // get mode
1683 beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
1684 mtpr r30, pt_usp // save user stack
1685 stq_p r30, osfpcb_q_usp(r12) // store usp
1686 br r31, pal_update_pcb_20_ // join common
1687pal_update_pcb_10_: stq_p r30, osfpcb_q_ksp(r12) // store ksp
1688pal_update_pcb_20_: rpcc r13 // get cyccounter
1689 srl r13, 32, r14 // move offset
1690 addl r13, r14, r14 // merge for new time
1691 stl_p r14, osfpcb_l_cc(r12) // save time
1692
1693//orig pvc_jsr updpcb, bsr=1, dest=1
1694 ret r31, (r0)
1695
1696
1697//
1698// pal_save_state
1699//
1700// Function
1701// All chip state saved, all PT's, SR's FR's, IPR's
1702//
1703//
1704// Regs' on entry...
1705//
1706// R0 = halt code
1707// pt0 = r0
1708// R1 = pointer to impure
1709// pt4 = r1
1710// R3 = return addr
1711// pt5 = r3
1712//
1713// register usage:
1714// r0 = halt_code
1715// r1 = addr of impure area
1716// r3 = return_address
1717// r4 = scratch
1718//
1719//
1720
1721 ALIGN_BLOCK
1722 .globl pal_save_state
1723pal_save_state:
1724//
1725//
1726// start of implementation independent save routine
1727//
1728// the impure area is larger than the addressibility of hw_ld and hw_st
1729// therefore, we need to play some games: The impure area
1730// is informally divided into the "machine independent" part and the
1731// "machine dependent" part. The state that will be saved in the
1732// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
1733// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
1734// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
1735// macros will automagically adjust the offset correctly.
1736//
1737
1738// The distributed code is commented out and followed by corresponding SRC code.
1739// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
1740
1741//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
1742 lda r1, 0x200(r1) // Point to center of CPU segment
1743//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
1744 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
1745//orig store_reg1 hlt, r0, r1, ipr=1
1746 SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
1747
1748 mfpr r0, pt0 // get r0 back //orig
1749//orig store_reg1 0, r0, r1 // save r0
1750 SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
1751
1752 mfpr r0, pt4 // get r1 back //orig
1753//orig store_reg1 1, r0, r1 // save r1
1754 SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
1755
1756//orig store_reg 2 // save r2
1757 SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
1758
1759 mfpr r0, pt5 // get r3 back //orig
1760//orig store_reg1 3, r0, r1 // save r3
1761 SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
1762
1763 // reason code has been saved
1764 // r0 has been saved
1765 // r1 has been saved
1766 // r2 has been saved
1767 // r3 has been saved
1768 // pt0, pt4, pt5 have been lost
1769
1770 //
1771 // Get out of shadow mode
1772 //
1773
1774 mfpr r2, icsr // Get icsr
1775 ldah r0, (1<<(icsr_v_sde-16))(r31)
1776 bic r2, r0, r0 // ICSR with SDE clear
1777 mtpr r0, icsr // Turn off SDE
1778
1779 mfpr r31, pt0 // SDE bubble cycle 1
1780 mfpr r31, pt0 // SDE bubble cycle 2
1781 mfpr r31, pt0 // SDE bubble cycle 3
1782 nop
1783
1784
1785 // save integer regs R4-r31
1786 SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
1787 SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
1788 SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
1789 SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
1790 SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
1791 SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
1792 SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
1793 SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
1794 SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
1795 SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
1796 SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
1797 SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
1798 SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
1799 SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
1800 SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
1801 SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
1802 SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
1803 SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
1804 SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
1805 SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
1806 SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
1807 SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
1808 SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
1809 SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
1810 SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
1811 SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
1812 SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
1813 SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
1814
1815 // save all paltemp regs except pt0
1816
1817//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
1818//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
1819
1820 lda r1, -0x200(r1) // Restore the impure base address.
1821 lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
1822 SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
1823 SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
1824 SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
1825 SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
1826 SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
1827 SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
1828 SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
1829 SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
1830 SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
1831 SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
1832 SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
1833 SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
1834 SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
1835 SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
1836 SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
1837 SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
1838 SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
1839 SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
1840 SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
1841 SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
1842 SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
1843 SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
1844 SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
1845 SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
1846
1847 // Restore shadow mode
1848 mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write)
1849 mfpr r31, pt0
1850 mtpr r2, icsr // Restore original ICSR
1851
1852 mfpr r31, pt0 // SDE bubble cycle 1
1853 mfpr r31, pt0 // SDE bubble cycle 2
1854 mfpr r31, pt0 // SDE bubble cycle 3
1855 nop
1856
1857 // save all integer shadow regs
1858 SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
1859 SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
1860 SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
1861 SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
1862 SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
1863 SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
1864 SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
1865 SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
1866
1867 SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
1868 SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
1869 SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
1870 SAVE_IPR(va,CNS_Q_VA,r1)
1871 SAVE_IPR(icsr,CNS_Q_ICSR,r1)
1872 SAVE_IPR(ipl,CNS_Q_IPL,r1)
1873 SAVE_IPR(ips,CNS_Q_IPS,r1)
1874 SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
1875 SAVE_IPR(aster,CNS_Q_ASTER,r1)
1876 SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
1877 SAVE_IPR(sirr,CNS_Q_SIRR,r1)
1878 SAVE_IPR(isr,CNS_Q_ISR,r1)
1879 SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
1880 SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
1881 SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
1882
1883//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
1884//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
1885//orig // PVC violation applies only to
1886pvc$osf35$379: // loads. HW_ST ok here, so ignore
1887 SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
1888
1889
1890 //the following iprs are informational only -- will not be restored
1891
1892 SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
1893 SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
1894 SAVE_IPR(intId,CNS_Q_INT_ID,r1)
1895 SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
1896 SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
1897 ldah r14, 0xFFF0(zero)
1898 zap r14, 0xE0, r14 // Get base address of CBOX IPRs
1899 NOP // Pad mfpr dcPerr out of shadow of
1900 NOP // last store
1901 NOP
1902 SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
1903
1904 // read cbox ipr state
1905
1906 mb
1907 ldq_p r2, scCtl(r14)
1908 ldq_p r13, ldLock(r14)
1909 ldq_p r4, scAddr(r14)
1910 ldq_p r5, eiAddr(r14)
1911 ldq_p r6, bcTagAddr(r14)
1912 ldq_p r7, fillSyn(r14)
1913 bis r5, r4, zero // Make sure all loads complete before
1914 bis r7, r6, zero // reading registers that unlock them.
1915 ldq_p r8, scStat(r14) // Unlocks scAddr.
1916 ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
1917 ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
1918 mb
1919
1920 // save cbox ipr state
1921 SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
1922 SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
1923 SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
1924 SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
1925 SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
1926 SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
1927 SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
1928 SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
1929 //bc_config? sl_rcv?
1930
1931// restore impure base
1932//orig unfix_impure_ipr r1
1933 lda r1, -CNS_Q_IPR(r1)
1934
1935// save all floating regs
1936 mfpr r0, icsr // get icsr
1937 or r31, 1, r2 // get a one
1938 sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
1939 or r2, r0, r0 // set FEN on
1940 mtpr r0, icsr // write to icsr, enabling FEN
1941
1942// map the save area virtually
1943 mtpr r31, dtbIa // Clear all DTB entries
1944 srl r1, va_s_off, r0 // Clean off byte-within-page offset
1945 sll r0, pte_v_pfn, r0 // Shift to form PFN
1946 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1947 mtpr r0, dtbPte // Load the PTE and set valid
1948 mtpr r1, dtbTag // Write the PTE and tag into the DTB
1949
1950
1951// map the next page too - in case the impure area crosses a page boundary
1952 lda r4, (1<<va_s_off)(r1) // Generate address for next page
1953 srl r4, va_s_off, r0 // Clean off byte-within-page offset
1954 sll r0, pte_v_pfn, r0 // Shift to form PFN
1955 lda r0, pte_m_prot(r0) // Set all read/write enable bits
1956 mtpr r0, dtbPte // Load the PTE and set valid
1957 mtpr r4, dtbTag // Write the PTE and tag into the DTB
1958
1959 sll r31, 0, r31 // stall cycle 1
1960 sll r31, 0, r31 // stall cycle 2
1961 sll r31, 0, r31 // stall cycle 3
1962 nop
1963
1964// add offset for saving fpr regs
1965//orig fix_impure_gpr r1
1966 lda r1, 0x200(r1) // Point to center of CPU segment
1967
1968// now save the regs - F0-F31
1969 mf_fpcr f0 // original
1970
1971 SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
1972 SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
1973 SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
1974 SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
1975 SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
1976 SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
1977 SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
1978 SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
1979 SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
1980 SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
1981 SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
1982 SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
1983 SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
1984 SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
1985 SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
1986 SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
1987 SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
1988 SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
1989 SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
1990 SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
1991 SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
1992 SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
1993 SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
1994 SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
1995 SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
1996 SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
1997 SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
1998 SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
1999 SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2000 SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2001 SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2002 SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2003
2004//switch impure offset from gpr to ipr---
2005//orig unfix_impure_gpr r1
2006//orig fix_impure_ipr r1
2007//orig store_reg1 fpcsr, f0, r1, fpcsr=1
2008
2009 SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach
2010 lda r1, -0x200(r1) // Restore the impure base address
2011
2012// and back to gpr ---
2013//orig unfix_impure_ipr r1
2014//orig fix_impure_gpr r1
2015
2016//orig lda r0, cns_mchksize(r31) // get size of mchk area
2017//orig store_reg1 mchkflag, r0, r1, ipr=1
2018//orig mb
2019
2020 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
2021 // save this using the IPR base (it is closer) not the GRP base as they used...pb
2022 lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
2023 SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
2024 mb
2025
2026//orig or r31, 1, r0 // get a one
2027//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
2028//orig mb
2029
2030 lda r1, -CNS_Q_IPR(r1) // back to the base
2031 lda r1, 0x200(r1) // Point to center of CPU segment
2032 or r31, 1, r0 // get a one
2033 SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
2034 mb
2035
2036 // restore impure area base
2037//orig unfix_impure_gpr r1
2038 lda r1, -0x200(r1) // Point to center of CPU segment
2039
2040 mtpr r31, dtb_ia // clear the dtb
2041 mtpr r31, itb_ia // clear the itb
2042
2043//orig pvc_jsr savsta, bsr=1, dest=1
2044 ret r31, (r3) // and back we go
2045
2046
2047
2048// .sbttl "PAL_RESTORE_STATE"
2049//
2050//
2051// Pal_restore_state
2052//
2053//
2054// register usage:
2055// r1 = addr of impure area
2056// r3 = return_address
2057// all other regs are scratchable, as they are about to
2058// be reloaded from ram.
2059//
2060// Function:
2061// All chip state restored, all SRs, FRs, PTs, IPRs
2062// *** except R1, R3, PT0, PT4, PT5 ***
2063//
2064//
2065 ALIGN_BLOCK
2066pal_restore_state:
2067
2068//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
2069
2070// map the console io area virtually
2071 mtpr r31, dtbIa // Clear all DTB entries
2072 srl r1, va_s_off, r0 // Clean off byte-within-page offset
2073 sll r0, pte_v_pfn, r0 // Shift to form PFN
2074 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2075 mtpr r0, dtbPte // Load the PTE and set valid
2076 mtpr r1, dtbTag // Write the PTE and tag into the DTB
2077
2078
2079// map the next page too, in case impure area crosses page boundary
2080 lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
2081 srl r4, va_s_off, r0 // Clean off byte-within-page offset
2082 sll r0, pte_v_pfn, r0 // Shift to form PFN
2083 lda r0, pte_m_prot(r0) // Set all read/write enable bits
2084 mtpr r0, dtbPte // Load the PTE and set valid
2085 mtpr r4, dtbTag // Write the PTE and tag into the DTB
2086
2087// save all floating regs
2088 mfpr r0, icsr // Get current ICSR
2089 bis zero, 1, r2 // Get a '1'
2090 or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
2091 sll r2, icsr_v_fpe, r2 // Shift bits into position
2092 bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
2093 mtpr r0, icsr // Update the chip
2094
2095 mfpr r31, pt0 // FPE bubble cycle 1 //orig
2096 mfpr r31, pt0 // FPE bubble cycle 2 //orig
2097 mfpr r31, pt0 // FPE bubble cycle 3 //orig
2098
2099//orig fix_impure_ipr r1
2100//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
2101//orig mt_fpcr f0
2102//orig
2103//orig unfix_impure_ipr r1
2104//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
2105 lda r1, 200(r1) // Point to base of IPR area again
2106 RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
2107 mt_fpcr f0 // original
2108
2109 lda r1, 0x200(r1) // point to center of CPU segment
2110
2111// restore all floating regs
2112 RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
2113 RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
2114 RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
2115 RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
2116 RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
2117 RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
2118 RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
2119 RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
2120 RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
2121 RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
2122 RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
2123 RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
2124 RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
2125 RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
2126 RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
2127 RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
2128 RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
2129 RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
2130 RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
2131 RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
2132 RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
2133 RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
2134 RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
2135 RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
2136 RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
2137 RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
2138 RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
2139 RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
2140 RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
2141 RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
2142 RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
2143 RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
2144
2145// switch impure pointer from gpr to ipr area --
2146//orig unfix_impure_gpr r1
2147//orig fix_impure_ipr r1
2148 lda r1, -0x200(r1) // Restore base address of impure area.
2149 lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
2150
2151// restore all pal regs
2152 RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
2153 RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
2154 RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
2155 RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
2156 RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
2157 RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
2158 RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
2159 RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
2160 RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
2161 RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
2162 RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
2163 RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
2164 RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
2165 RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
2166 RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
2167 RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
2168 RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
2169 RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
2170 RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
2171 RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
2172 RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
2173 RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
2174 RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
2175 RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
2176
2177
2178//orig restore_reg exc_addr, ipr=1 // restore ipr
2179//orig restore_reg pal_base, ipr=1 // restore ipr
2180//orig restore_reg ipl, ipr=1 // restore ipr
2181//orig restore_reg ps, ipr=1 // restore ipr
2182//orig mtpr r0, dtb_cm // set current mode in mbox too
2183//orig restore_reg itb_asn, ipr=1
2184//orig srl r0, itb_asn_v_asn, r0
2185//orig sll r0, dtb_asn_v_asn, r0
2186//orig mtpr r0, dtb_asn // set ASN in Mbox too
2187//orig restore_reg ivptbr, ipr=1
2188//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
2189//orig restore_reg mcsr, ipr=1
2190//orig restore_reg aster, ipr=1
2191//orig restore_reg astrr, ipr=1
2192//orig restore_reg sirr, ipr=1
2193//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
2194//orig mfpr r31, pt0 // (may issue with mt maf_mode)
2195//orig mfpr r31, pt0 // bubble cycle 1
2196//orig mfpr r31, pt0 // bubble cycle 2
2197//orig mfpr r31, pt0 // bubble cycle 3
2198//orig mfpr r31, pt0 // (may issue with following ld)
2199
2200 // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
2201 RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
2202 RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
2203 RESTORE_IPR(ipl,CNS_Q_IPL,r1)
2204 RESTORE_IPR(ips,CNS_Q_IPS,r1)
2205 mtpr r0, dtbCm // Set Mbox current mode too.
2206 RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
2207 srl r0, 4, r0
2208 sll r0, 57, r0
2209 mtpr r0, dtbAsn // Set Mbox ASN too
2210 RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
2211 mtpr r0, mVptBr // Set Mbox VptBr too
2212 RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
2213 RESTORE_IPR(aster,CNS_Q_ASTER,r1)
2214 RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
2215 RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
2216 RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
2217 STALL
2218 STALL
2219 STALL
2220 STALL
2221 STALL
2222
2223
2224 // restore all integer shadow regs
2225 RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
2226 RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
2227 RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
2228 RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
2229 RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
2230 RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
2231 RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
2232 RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
2233 RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
2234
2235 //
2236 // Get out of shadow mode
2237 //
2238
2239 mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway)
2240 mfpr r31, pt0 // ""
2241 mfpr r0, icsr // Get icsr
2242 ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location
2243 bic r0, r2, r2 // ICSR with SDE clear
2244 mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles
2245
2246 mfpr r31, pt0 // SDE bubble cycle 1
2247 mfpr r31, pt0 // SDE bubble cycle 2
2248 mfpr r31, pt0 // SDE bubble cycle 3
2249 nop
2250
2251// switch impure pointer from ipr to gpr area --
2252//orig unfix_impure_ipr r1
2253//orig fix_impure_gpr r1
2254
2255// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
2256
2257 lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
2258 lda r1, 0x200(r1) // Point to center of CPU segment
2259
2260 // restore all integer regs
2261 RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
2262 RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
2263 RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
2264 RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
2265 RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
2266 RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
2267 RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
2268 RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
2269 RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
2270 RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
2271 RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
2272 RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
2273 RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
2274 RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
2275 RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
2276 RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
2277 RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
2278 RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
2279 RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
2280 RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
2281 RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
2282 RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
2283 RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
2284 RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
2285 RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
2286 RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
2287 RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
2288 RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
2289
2290//orig // switch impure pointer from gpr to ipr area --
2291//orig unfix_impure_gpr r1
2292//orig fix_impure_ipr r1
2293//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
2294
2295 lda t0, -0x200(t0) // Restore base address of impure area.
2296 lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
2297 RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
2298
2299//orig // and back again --
2300//orig unfix_impure_ipr r1
2301//orig fix_impure_gpr r1
2302//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
2303//orig mb
2304
2305 lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
2306 lda t0, 0x200(t0) // and back to center of CPU segment
2307 SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
2308 mb
2309
2310//orig // and back we go
2311//orig// restore_reg 3
2312//orig restore_reg 2
2313//orig// restore_reg 1
2314//orig restore_reg 0
2315//orig // restore impure area base
2316//orig unfix_impure_gpr r1
2317
2318 RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
2319 RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
2320 lda r1, -0x200(r1) // Restore impure base address
2321
2322 mfpr r31, pt0 // stall for ldq_p above //orig
2323
2324 mtpr r31, dtb_ia // clear the tb //orig
2325 mtpr r31, itb_ia // clear the itb //orig
2326
2327//orig pvc_jsr rststa, bsr=1, dest=1
2328 ret r31, (r3) // back we go //orig
2329
2330
2331//
2332// pal_pal_bug_check -- code has found a bugcheck situation.
2333// Set things up and join common machine check flow.
2334//
2335// Input:
2336// r14 - exc_addr
2337//
2338// On exit:
2339// pt0 - saved r0
2340// pt1 - saved r1
2341// pt4 - saved r4
2342// pt5 - saved r5
2343// pt6 - saved r6
2344// pt10 - saved exc_addr
2345// pt_misc<47:32> - mchk code
2346// pt_misc<31:16> - scb vector
2347// r14 - base of Cbox IPRs in IO space
2348// MCES<mchk> is set
2349//
2350
2351 ALIGN_BLOCK
2352 .globl pal_pal_bug_check_from_int
2353pal_pal_bug_check_from_int:
2354 DEBUGSTORE(0x79)
2355//simos DEBUG_EXC_ADDR()
2356 DEBUGSTORE(0x20)
2357//simos bsr r25, put_hex
2358 lda r25, mchk_c_bugcheck(r31)
2359 addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
2360 br r31, pal_pal_mchk
2361 nop
2362
2363pal_pal_bug_check:
2364 lda r25, mchk_c_bugcheck(r31)
2365
2366pal_pal_mchk:
2367 sll r25, 32, r25 // Move mchk code to position
2368
2369 mtpr r14, pt10 // Stash exc_addr
2370 mtpr r14, exc_addr
2371
2372 mfpr r12, pt_misc // Get MCES and scratch
2373 zap r12, 0x3c, r12
2374
2375 or r12, r25, r12 // Combine mchk code
2376 lda r25, scb_v_procmchk(r31) // Get SCB vector
2377
2378 sll r25, 16, r25 // Move SCBv to position
2379 or r12, r25, r25 // Combine SCBv
2380
2381 mtpr r0, pt0 // Stash for scratch
2382 bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
2383
2384 mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
2385 ldah r14, 0xfff0(r31)
2386
2387 mtpr r1, pt1 // Stash for scratch
2388 zap r14, 0xE0, r14 // Get Cbox IPR base
2389
2390 mtpr r4, pt4
2391 mtpr r5, pt5
2392
2393 mtpr r6, pt6
2394 blbs r12, sys_double_machine_check // MCHK halt if double machine check
2395
2396 br r31, sys_mchk_collect_iprs // Join common machine check flow
2397
2398
2399
2400// align_to_call_pal_section
2401// Align to address of first call_pal entry point - 2000
2402
2403//
2404// HALT - PALcode for HALT instruction
2405//
2406// Entry:
2407// Vectored into via hardware PALcode instruction dispatch.
2408//
2409// Function:
2410// GO to console code
2411//
2412//
2413
2414 .text 1
2415// . = 0x2000
2416 CALL_PAL_PRIV(PAL_HALT_ENTRY)
2417call_pal_halt:
2418 mfpr r31, pt0 // Pad exc_addr read
2419 mfpr r31, pt0
2420
2421 mfpr r12, exc_addr // get PC
2422 subq r12, 4, r12 // Point to the HALT
2423
2424 mtpr r12, exc_addr
2425 mtpr r0, pt0
2426
2427//orig pvc_jsr updpcb, bsr=1
2428 bsr r0, pal_update_pcb // update the pcb
2429 lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
2430 br r31, sys_enter_console // enter the console
2431
2432//
2433// CFLUSH - PALcode for CFLUSH instruction
2434//
2435// Entry:
2436// Vectored into via hardware PALcode instruction dispatch.
2437//
2438// R16 - contains the PFN of the page to be flushed
2439//
2440// Function:
2441// Flush all Dstream caches of 1 entire page
2442// The CFLUSH routine is in the system specific module.
2443//
2444//
2445
2446 CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
2447Call_Pal_Cflush:
2448 br r31, sys_cflush
2449
2450//
2451// DRAINA - PALcode for DRAINA instruction
2452//
2453// Entry:
2454// Vectored into via hardware PALcode instruction dispatch.
2455// Implicit TRAPB performed by hardware.
2456//
2457// Function:
2458// Stall instruction issue until all prior instructions are guaranteed to
2459// complete without incurring aborts. For the EV5 implementation, this
2460// means waiting until all pending DREADS are returned.
2461//
2462//
2463
2464 CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
2465Call_Pal_Draina:
2466 ldah r14, 0x100(r31) // Init counter. Value?
2467 nop
2468
2469DRAINA_LOOP:
2470 subq r14, 1, r14 // Decrement counter
2471 mfpr r13, ev5__maf_mode // Fetch status bit
2472
2473 srl r13, maf_mode_v_dread_pending, r13
2474 ble r14, DRAINA_LOOP_TOO_LONG
2475
2476 nop
2477 blbs r13, DRAINA_LOOP // Wait until all DREADS clear
2478
2479 hw_rei
2480
2481DRAINA_LOOP_TOO_LONG:
2482 br r31, call_pal_halt
2483
2484// CALL_PAL OPCDECs
2485
2486 CALL_PAL_PRIV(0x0003)
2487CallPal_OpcDec03:
2488 br r31, osfpal_calpal_opcdec
2489
2490 CALL_PAL_PRIV(0x0004)
2491CallPal_OpcDec04:
2492 br r31, osfpal_calpal_opcdec
2493
2494 CALL_PAL_PRIV(0x0005)
2495CallPal_OpcDec05:
2496 br r31, osfpal_calpal_opcdec
2497
2498 CALL_PAL_PRIV(0x0006)
2499CallPal_OpcDec06:
2500 br r31, osfpal_calpal_opcdec
2501
2502 CALL_PAL_PRIV(0x0007)
2503CallPal_OpcDec07:
2504 br r31, osfpal_calpal_opcdec
2505
2506 CALL_PAL_PRIV(0x0008)
2507CallPal_OpcDec08:
2508 br r31, osfpal_calpal_opcdec
2509
2510//
2511// CSERVE - PALcode for CSERVE instruction
2512//
2513// Entry:
2514// Vectored into via hardware PALcode instruction dispatch.
2515//
2516// Function:
2517// Various functions for private use of console software
2518//
2519// option selector in r0
2520// arguments in r16....
2521// The CSERVE routine is in the system specific module.
2522//
2523//
2524
2525 CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
2526Call_Pal_Cserve:
2527 br r31, sys_cserve
2528
2529//
2530// swppal - PALcode for swppal instruction
2531//
2532// Entry:
2533// Vectored into via hardware PALcode instruction dispatch.
2534// Vectored into via hardware PALcode instruction dispatch.
2535// R16 contains the new PAL identifier
2536// R17:R21 contain implementation-specific entry parameters
2537//
2538// R0 receives status:
2539// 0 success (PAL was switched)
2540// 1 unknown PAL variant
2541// 2 known PAL variant, but PAL not loaded
2542//
2543//
2544// Function:
2545// Swap control to another PAL.
2546//
2547
2548 CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
2549Call_Pal_Swppal:
2550 cmpule r16, 255, r0 // see if a kibble was passed
2551 cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
2552
2553 or r16, r31, r3 // set r3 incase this is a address
2554 blbc r0, swppal_cont // nope, try it as an address
2555
2556 cmpeq r16, 2, r0 // is it our friend OSF?
2557 blbc r0, swppal_fail // nope, don't know this fellow
2558
2559 br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
2560
2561// .global osfpal_hw_entry_reset
2562// .weak osfpal_hw_entry_reset
2563// .long <osfpal_hw_entry_reset-pal_start>
2564//orig halt // don't know how to get the address here - kludge ok, load pal at 0
2565 .long 0 // ?? hack upon hack...pb
2566
2567CALL_PAL_SWPPAL_10_: ldl_p r3, 0(r2) // fetch target addr
2568// ble r3, swppal_fail ; if OSF not linked in say not loaded.
2569 mfpr r2, pal_base // fetch pal base
2570
2571 addq r2, r3, r3 // add pal base
2572 lda r2, 0x3FFF(r31) // get pal base checker mask
2573
2574 and r3, r2, r2 // any funky bits set?
2575 cmpeq r2, 0, r0 //
2576
2577 blbc r0, swppal_fail // return unknown if bad bit set.
2578 br r31, swppal_cont
2579
2580// .sbttl "CALL_PAL OPCDECs"
2581
2582 CALL_PAL_PRIV(0x000B)
2583CallPal_OpcDec0B:
2584 br r31, osfpal_calpal_opcdec
2585
2586 CALL_PAL_PRIV(0x000C)
2587CallPal_OpcDec0C:
2588 br r31, osfpal_calpal_opcdec
2589
2590//
2591// wripir - PALcode for wripir instruction
2592//
2593// Entry:
2594// Vectored into via hardware PALcode instruction dispatch.
2595// r16 = processor number to interrupt
2596//
2597// Function:
2598// IPIR <- R16
2599// Handled in system-specific code
2600//
2601// Exit:
2602// interprocessor interrupt is recorded on the target processor
2603// and is initiated when the proper enabling conditions are present.
2604//
2605
2606 CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
2607Call_Pal_Wrpir:
2608 br r31, sys_wripir
2609
2610// .sbttl "CALL_PAL OPCDECs"
2611
2612 CALL_PAL_PRIV(0x000E)
2613CallPal_OpcDec0E:
2614 br r31, osfpal_calpal_opcdec
2615
2616 CALL_PAL_PRIV(0x000F)
2617CallPal_OpcDec0F:
2618 br r31, osfpal_calpal_opcdec
2619
2620//
2621// rdmces - PALcode for rdmces instruction
2622//
2623// Entry:
2624// Vectored into via hardware PALcode instruction dispatch.
2625//
2626// Function:
2627// R0 <- ZEXT(MCES)
2628//
2629
2630 CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
2631Call_Pal_Rdmces:
2632 mfpr r0, pt_mces // Read from PALtemp
2633 and r0, mces_m_all, r0 // Clear other bits
2634
2635 hw_rei
2636
2637//
2638// wrmces - PALcode for wrmces instruction
2639//
2640// Entry:
2641// Vectored into via hardware PALcode instruction dispatch.
2642//
2643// Function:
2644// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
2645// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
2646// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
2647// MCES<3> <- R16<3> (DPC)
2648// MCES<4> <- R16<4> (DSC)
2649//
2650//
2651
2652 CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
2653Call_Pal_Wrmces:
2654 and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
2655 mfpr r14, pt_mces // Get current value
2656
2657 ornot r31, r13, r13 // Flip all the bits
2658 and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
2659
2660 and r14, r13, r1 // Update MCHK, SCE, PCE
2661 bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
2662
2663 or r1, r17, r1 // Update DPC and DSC
2664 mtpr r1, pt_mces // Write MCES back
2665
2666 nop // Pad to fix PT write->read restriction
2667
2668 nop
2669 hw_rei
2670
2671
2672
2673// CALL_PAL OPCDECs
2674
2675 CALL_PAL_PRIV(0x0012)
2676CallPal_OpcDec12:
2677 br r31, osfpal_calpal_opcdec
2678
2679 CALL_PAL_PRIV(0x0013)
2680CallPal_OpcDec13:
2681 br r31, osfpal_calpal_opcdec
2682
2683 CALL_PAL_PRIV(0x0014)
2684CallPal_OpcDec14:
2685 br r31, osfpal_calpal_opcdec
2686
2687 CALL_PAL_PRIV(0x0015)
2688CallPal_OpcDec15:
2689 br r31, osfpal_calpal_opcdec
2690
2691 CALL_PAL_PRIV(0x0016)
2692CallPal_OpcDec16:
2693 br r31, osfpal_calpal_opcdec
2694
2695 CALL_PAL_PRIV(0x0017)
2696CallPal_OpcDec17:
2697 br r31, osfpal_calpal_opcdec
2698
2699 CALL_PAL_PRIV(0x0018)
2700CallPal_OpcDec18:
2701 br r31, osfpal_calpal_opcdec
2702
2703 CALL_PAL_PRIV(0x0019)
2704CallPal_OpcDec19:
2705 br r31, osfpal_calpal_opcdec
2706
2707 CALL_PAL_PRIV(0x001A)
2708CallPal_OpcDec1A:
2709 br r31, osfpal_calpal_opcdec
2710
2711 CALL_PAL_PRIV(0x001B)
2712CallPal_OpcDec1B:
2713 br r31, osfpal_calpal_opcdec
2714
2715 CALL_PAL_PRIV(0x001C)
2716CallPal_OpcDec1C:
2717 br r31, osfpal_calpal_opcdec
2718
2719 CALL_PAL_PRIV(0x001D)
2720CallPal_OpcDec1D:
2721 br r31, osfpal_calpal_opcdec
2722
2723 CALL_PAL_PRIV(0x001E)
2724CallPal_OpcDec1E:
2725 br r31, osfpal_calpal_opcdec
2726
2727 CALL_PAL_PRIV(0x001F)
2728CallPal_OpcDec1F:
2729 br r31, osfpal_calpal_opcdec
2730
2731 CALL_PAL_PRIV(0x0020)
2732CallPal_OpcDec20:
2733 br r31, osfpal_calpal_opcdec
2734
2735 CALL_PAL_PRIV(0x0021)
2736CallPal_OpcDec21:
2737 br r31, osfpal_calpal_opcdec
2738
2739 CALL_PAL_PRIV(0x0022)
2740CallPal_OpcDec22:
2741 br r31, osfpal_calpal_opcdec
2742
2743 CALL_PAL_PRIV(0x0023)
2744CallPal_OpcDec23:
2745 br r31, osfpal_calpal_opcdec
2746
2747 CALL_PAL_PRIV(0x0024)
2748CallPal_OpcDec24:
2749 br r31, osfpal_calpal_opcdec
2750
2751 CALL_PAL_PRIV(0x0025)
2752CallPal_OpcDec25:
2753 br r31, osfpal_calpal_opcdec
2754
2755 CALL_PAL_PRIV(0x0026)
2756CallPal_OpcDec26:
2757 br r31, osfpal_calpal_opcdec
2758
2759 CALL_PAL_PRIV(0x0027)
2760CallPal_OpcDec27:
2761 br r31, osfpal_calpal_opcdec
2762
2763 CALL_PAL_PRIV(0x0028)
2764CallPal_OpcDec28:
2765 br r31, osfpal_calpal_opcdec
2766
2767 CALL_PAL_PRIV(0x0029)
2768CallPal_OpcDec29:
2769 br r31, osfpal_calpal_opcdec
2770
2771 CALL_PAL_PRIV(0x002A)
2772CallPal_OpcDec2A:
2773 br r31, osfpal_calpal_opcdec
2774
2775//
2776// wrfen - PALcode for wrfen instruction
2777//
2778// Entry:
2779// Vectored into via hardware PALcode instruction dispatch.
2780//
2781// Function:
2782// a0<0> -> ICSR<FPE>
2783// Store new FEN in PCB
2784// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16)
2785// are UNPREDICTABLE
2786//
2787// Issue: What about pending FP loads when FEN goes from on->off????
2788//
2789
2790 CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
2791Call_Pal_Wrfen:
2792 or r31, 1, r13 // Get a one
2793 mfpr r1, ev5__icsr // Get current FPE
2794
2795 sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
2796 and r16, 1, r16 // clean new fen
2797
2798 sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
2799 bic r1, r13, r1 // zero icsr<fpe>
2800
2801 or r1, r12, r1 // Or new FEN into ICSR
2802 mfpr r12, pt_pcbb // Get PCBB - E1
2803
2804 mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
2805 stl_p r16, osfpcb_q_fen(r12) // Store FEN in PCB.
2806
2807 mfpr r31, pt0 // Pad ICSR<FPE> write.
2808 mfpr r31, pt0
2809
2810 mfpr r31, pt0
2811// pvc_violate 225 // cuz PVC can't distinguish which bits changed
2812 hw_rei
2813
2814
2815 CALL_PAL_PRIV(0x002C)
2816CallPal_OpcDec2C:
2817 br r31, osfpal_calpal_opcdec
2818
2819//
2820// wrvptpr - PALcode for wrvptpr instruction
2821//
2822// Entry:
2823// Vectored into via hardware PALcode instruction dispatch.
2824//
2825// Function:
2826// vptptr <- a0 (r16)
2827//
2828
2829 CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
2830Call_Pal_Wrvptptr:
2831 mtpr r16, ev5__mvptbr // Load Mbox copy
2832 mtpr r16, ev5__ivptbr // Load Ibox copy
2833 nop // Pad IPR write
2834 nop
2835 hw_rei
2836
2837 CALL_PAL_PRIV(0x002E)
2838CallPal_OpcDec2E:
2839 br r31, osfpal_calpal_opcdec
2840
2841 CALL_PAL_PRIV(0x002F)
2842CallPal_OpcDec2F:
2843 br r31, osfpal_calpal_opcdec
2844
2845
2846//
2847// swpctx - PALcode for swpctx instruction
2848//
2849// Entry:
2850// hardware dispatch via callPal instruction
2851// R16 -> new pcb
2852//
2853// Function:
2854// dynamic state moved to old pcb
2855// new state loaded from new pcb
2856// pcbb pointer set
2857// old pcbb returned in R0
2858//
2859// Note: need to add perf monitor stuff
2860//
2861
2862 CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
2863Call_Pal_Swpctx:
2864 rpcc r13 // get cyccounter
2865 mfpr r0, pt_pcbb // get pcbb
2866
2867 ldq_p r22, osfpcb_q_fen(r16) // get new fen/pme
2868 ldq_p r23, osfpcb_l_cc(r16) // get new asn
2869
2870 srl r13, 32, r25 // move offset
2871 mfpr r24, pt_usp // get usp
2872
2873 stq_p r30, osfpcb_q_ksp(r0) // store old ksp
2874// pvc_violate 379 // stq_p can't trap except replay. only problem if mf same ipr in same shadow.
2875 mtpr r16, pt_pcbb // set new pcbb
2876
2877 stq_p r24, osfpcb_q_usp(r0) // store usp
2878 addl r13, r25, r25 // merge for new time
2879
2880 stl_p r25, osfpcb_l_cc(r0) // save time
2881 ldah r24, (1<<(icsr_v_fpe-16))(r31)
2882
2883 and r22, 1, r12 // isolate fen
2884 mfpr r25, icsr // get current icsr
2885
2886 lda r24, (1<<icsr_v_pmp)(r24)
2887 br r31, swpctx_cont
2888
2889//
2890// wrval - PALcode for wrval instruction
2891//
2892// Entry:
2893// Vectored into via hardware PALcode instruction dispatch.
2894//
2895// Function:
2896// sysvalue <- a0 (r16)
2897//
2898
2899 CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
2900Call_Pal_Wrval:
2901 nop
2902 mtpr r16, pt_sysval // Pad paltemp write
2903 nop
2904 nop
2905 hw_rei
2906
2907//
2908// rdval - PALcode for rdval instruction
2909//
2910// Entry:
2911// Vectored into via hardware PALcode instruction dispatch.
2912//
2913// Function:
2914// v0 (r0) <- sysvalue
2915//
2916
2917 CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
2918Call_Pal_Rdval:
2919 nop
2920 mfpr r0, pt_sysval
2921 nop
2922 hw_rei
2923
2924//
2925// tbi - PALcode for tbi instruction
2926//
2927// Entry:
2928// Vectored into via hardware PALcode instruction dispatch.
2929//
2930// Function:
2931// TB invalidate
2932// r16/a0 = TBI type
2933// r17/a1 = Va for TBISx instructions
2934//
2935
2936 CALL_PAL_PRIV(PAL_TBI_ENTRY)
2937Call_Pal_Tbi:
2938 addq r16, 2, r16 // change range to 0-2
2939 br r23, CALL_PAL_tbi_10_ // get our address
2940
2941CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
2942 lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
2943 sll r16, 4, r16 // * 16
2944 blbc r22, CALL_PAL_tbi_30_ // go rei, if not
2945
2946 addq r23, r16, r23 // addr of our code
2947//orig pvc_jsr tbi
2948 jmp r31, (r23) // and go do it
2949
2950CALL_PAL_tbi_30_:
2951 hw_rei
2952 nop
2953
2954//
2955// wrent - PALcode for wrent instruction
2956//
2957// Entry:
2958// Vectored into via hardware PALcode instruction dispatch.
2959//
2960// Function:
2961// Update ent* in paltemps
2962// r16/a0 = Address of entry routine
2963// r17/a1 = Entry Number 0..5
2964//
2965// r22, r23 trashed
2966//
2967
2968 CALL_PAL_PRIV(PAL_WRENT_ENTRY)
2969Call_Pal_Wrent:
2970 cmpult r17, 6, r22 // see if in range
2971 br r23, CALL_PAL_wrent_10_ // get our address
2972
2973CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
2974 blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
2975
2976 lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
2977 sll r17, 4, r17 // *16
2978
2979 addq r17, r23, r23 // Get address in table
2980//orig pvc_jsr wrent
2981 jmp r31, (r23) // and go do it
2982
2983CALL_PAL_wrent_30_:
2984 hw_rei // out of range, just return
2985
2986//
2987// swpipl - PALcode for swpipl instruction
2988//
2989// Entry:
2990// Vectored into via hardware PALcode instruction dispatch.
2991//
2992// Function:
2993// v0 (r0) <- PS<IPL>
2994// PS<IPL> <- a0<2:0> (r16)
2995//
2996// t8 (r22) is scratch
2997//
2998
2999 CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
3000Call_Pal_Swpipl:
3001 and r16, osfps_m_ipl, r16 // clean New ipl
3002 mfpr r22, pt_intmask // get int mask
3003
3004 extbl r22, r16, r22 // get mask for this ipl
3005 bis r11, r31, r0 // return old ipl
3006
3007 bis r16, r31, r11 // set new ps
3008 mtpr r22, ev5__ipl // set new mask
3009
3010 mfpr r31, pt0 // pad ipl write
3011 mfpr r31, pt0 // pad ipl write
3012
3013 hw_rei // back
3014
3015//
3016// rdps - PALcode for rdps instruction
3017//
3018// Entry:
3019// Vectored into via hardware PALcode instruction dispatch.
3020//
3021// Function:
3022// v0 (r0) <- ps
3023//
3024
3025 CALL_PAL_PRIV(PAL_RDPS_ENTRY)
3026Call_Pal_Rdps:
3027 bis r11, r31, r0 // Fetch PALshadow PS
3028 nop // Must be 2 cycles long
3029 hw_rei
3030
3031//
3032// wrkgp - PALcode for wrkgp instruction
3033//
3034// Entry:
3035// Vectored into via hardware PALcode instruction dispatch.
3036//
3037// Function:
3038// kgp <- a0 (r16)
3039//
3040
3041 CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
3042Call_Pal_Wrkgp:
3043 nop
3044 mtpr r16, pt_kgp
3045 nop // Pad for pt write->read restriction
3046 nop
3047 hw_rei
3048
3049//
3050// wrusp - PALcode for wrusp instruction
3051//
3052// Entry:
3053// Vectored into via hardware PALcode instruction dispatch.
3054//
3055// Function:
3056// usp <- a0 (r16)
3057//
3058
3059 CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
3060Call_Pal_Wrusp:
3061 nop
3062 mtpr r16, pt_usp
3063 nop // Pad possible pt write->read restriction
3064 nop
3065 hw_rei
3066
3067//
3068// wrperfmon - PALcode for wrperfmon instruction
3069//
3070// Entry:
3071// Vectored into via hardware PALcode instruction dispatch.
3072//
3073//
3074// Function:
3075// Various control functions for the onchip performance counters
3076//
3077// option selector in r16
3078// option argument in r17
3079// returned status in r0
3080//
3081//
3082// r16 = 0 Disable performance monitoring for one or more cpu's
3083// r17 = 0 disable no counters
3084// r17 = bitmask disable counters specified in bit mask (1=disable)
3085//
3086// r16 = 1 Enable performance monitoring for one or more cpu's
3087// r17 = 0 enable no counters
3088// r17 = bitmask enable counters specified in bit mask (1=enable)
3089//
3090// r16 = 2 Mux select for one or more cpu's
3091// r17 = Mux selection (cpu specific)
3092// <24:19> bc_ctl<pm_mux_sel> field (see spec)
3093// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
3094//
3095// r16 = 3 Options
3096// r17 = (cpu specific)
3097// <0> = 0 log all processes
3098// <0> = 1 log only selected processes
3099// <30,9,8> mode select - ku,kp,kk
3100//
3101// r16 = 4 Interrupt frequency select
3102// r17 = (cpu specific) indicates interrupt frequencies desired for each
3103// counter, with "zero interrupts" being an option
3104// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
3105//
3106// r16 = 5 Read Counters
3107// r17 = na
3108// r0 = value (same format as ev5 pmctr)
3109// <0> = 0 Read failed
3110// <0> = 1 Read succeeded
3111//
3112// r16 = 6 Write Counters
3113// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
3114//
3115// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
3116// r17 = 0 enable no counters
3117// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
3118//
3119//=============================================================================
3120//Assumptions:
3121//PMCTR_CTL:
3122//
3123// <15:14> CTL0 -- encoded frequency select and enable - CTR0
3124// <13:12> CTL1 -- " - CTR1
3125// <11:10> CTL2 -- " - CTR2
3126//
3127// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
3128// <7:6> FRQ1 -- frequency select for CTR1
3129// <5:4> FRQ2 -- frequency select for CTR2
3130//
3131// <0> all vs. select processes (0=all,1=select)
3132//
3133// where
3134// FRQx<1:0>
3135// 0 1 disable interrupt
3136// 1 0 frequency = 65536 (16384 for ctr2)
3137// 1 1 frequency = 256
3138// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
3139//
3140//=============================================================================
3141//
3142 CALL_PAL_PRIV(0x0039)
3143// unsupported in Hudson code .. pboyle Nov/95
3144CALL_PAL_Wrperfmon:
3145 // "real" performance monitoring code
3146 cmpeq r16, 1, r0 // check for enable
3147 bne r0, perfmon_en // br if requested to enable
3148
3149 cmpeq r16, 2, r0 // check for mux ctl
3150 bne r0, perfmon_muxctl // br if request to set mux controls
3151
3152 cmpeq r16, 3, r0 // check for options
3153 bne r0, perfmon_ctl // br if request to set options
3154
3155 cmpeq r16, 4, r0 // check for interrupt frequency select
3156 bne r0, perfmon_freq // br if request to change frequency select
3157
3158 cmpeq r16, 5, r0 // check for counter read request
3159 bne r0, perfmon_rd // br if request to read counters
3160
3161 cmpeq r16, 6, r0 // check for counter write request
3162 bne r0, perfmon_wr // br if request to write counters
3163
3164 cmpeq r16, 7, r0 // check for counter clear/enable request
3165 bne r0, perfmon_enclr // br if request to clear/enable counters
3166
3167 beq r16, perfmon_dis // br if requested to disable (r16=0)
3168 br r31, perfmon_unknown // br if unknown request
3169
3170//
3171// rdusp - PALcode for rdusp instruction
3172//
3173// Entry:
3174// Vectored into via hardware PALcode instruction dispatch.
3175//
3176// Function:
3177// v0 (r0) <- usp
3178//
3179
3180 CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
3181Call_Pal_Rdusp:
3182 nop
3183 mfpr r0, pt_usp
3184 hw_rei
3185
3186
3187 CALL_PAL_PRIV(0x003B)
3188CallPal_OpcDec3B:
3189 br r31, osfpal_calpal_opcdec
3190
3191//
3192// whami - PALcode for whami instruction
3193//
3194// Entry:
3195// Vectored into via hardware PALcode instruction dispatch.
3196//
3197// Function:
3198// v0 (r0) <- whami
3199//
3200 CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
3201Call_Pal_Whami:
3202 nop
3203 mfpr r0, pt_whami // Get Whami
3204 extbl r0, 1, r0 // Isolate just whami bits
3205 hw_rei
3206
3207//
3208// retsys - PALcode for retsys instruction
3209//
3210// Entry:
3211// Vectored into via hardware PALcode instruction dispatch.
3212// 00(sp) contains return pc
3213// 08(sp) contains r29
3214//
3215// Function:
3216// Return from system call.
3217// mode switched from kern to user.
3218// stacks swapped, ugp, upc restored.
3219// r23, r25 junked
3220//
3221
3222 CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
3223Call_Pal_Retsys:
3224 lda r25, osfsf_c_size(sp) // pop stack
3225 bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
3226
3227 mfpr r14, exc_addr // save exc_addr in case of fault
3228 ldq r23, osfsf_pc(sp) // get pc
3229
3230 ldq r29, osfsf_gp(sp) // get gp
3231 stl_c r31, -4(sp) // clear lock_flag
3232
3233 lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
3234 mfpr r30, pt_usp // get users stack
3235
3236 bic r23, 3, r23 // clean return pc
3237 mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
3238
3239 mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3240 mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3241
3242 mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
3243 mtpr r25, pt_ksp // save kern stack
3244
3245 rc r31 // clear inter_flag
3246// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
3247 hw_rei_spe // and back
3248
3249
3250 CALL_PAL_PRIV(0x003E)
3251CallPal_OpcDec3E:
3252 br r31, osfpal_calpal_opcdec
3253
3254//
3255// rti - PALcode for rti instruction
3256//
3257// Entry:
3258// Vectored into via hardware PALcode instruction dispatch.
3259//
3260// Function:
3261// 00(sp) -> ps
3262// 08(sp) -> pc
3263// 16(sp) -> r29 (gp)
3264// 24(sp) -> r16 (a0)
3265// 32(sp) -> r17 (a1)
3266// 40(sp) -> r18 (a3)
3267//
3268
3269 CALL_PAL_PRIV(PAL_RTI_ENTRY)
3270 /* called once by platform_tlaser */
3271 .globl Call_Pal_Rti
3272Call_Pal_Rti:
3273 lda r25, osfsf_c_size(sp) // get updated sp
3274 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
3275
3276 mfpr r14, exc_addr // save PC in case of fault
3277 rc r31 // clear intr_flag
3278
3279 ldq r12, -6*8(r25) // get ps
3280 ldq r13, -5*8(r25) // pc
3281
3282 ldq r18, -1*8(r25) // a2
3283 ldq r17, -2*8(r25) // a1
3284
3285 ldq r16, -3*8(r25) // a0
3286 ldq r29, -4*8(r25) // gp
3287
3288 bic r13, 3, r13 // clean return pc
3289 stl_c r31, -4(r25) // clear lock_flag
3290
3291 and r12, osfps_m_mode, r11 // get mode
3292 mtpr r13, exc_addr // set return address
3293
3294 beq r11, rti_to_kern // br if rti to Kern
3295 br r31, rti_to_user // out of call_pal space
3296
3297
3298///////////////////////////////////////////////////
3299// Start the Unprivileged CALL_PAL Entry Points
3300///////////////////////////////////////////////////
3301
3302//
3303// bpt - PALcode for bpt instruction
3304//
3305// Entry:
3306// Vectored into via hardware PALcode instruction dispatch.
3307//
3308// Function:
3309// Build stack frame
3310// a0 <- code
3311// a1 <- unpred
3312// a2 <- unpred
3313// vector via entIF
3314//
3315//
3316//
3317 .text 1
3318// . = 0x3000
3319 CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
3320Call_Pal_Bpt:
3321 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3322 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3323
3324 bis r11, r31, r12 // Save PS for stack write
3325 bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
3326
3327 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3328 // no virt ref for next 2 cycles
3329 mtpr r30, pt_usp // save user stack
3330
3331 bis r31, r31, r11 // Set new PS
3332 mfpr r30, pt_ksp
3333
3334CALL_PAL_bpt_10_:
3335 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3336 mfpr r14, exc_addr // get pc
3337
3338 stq r16, osfsf_a0(sp) // save regs
3339 bis r31, osf_a0_bpt, r16 // set a0
3340
3341 stq r17, osfsf_a1(sp) // a1
3342 br r31, bpt_bchk_common // out of call_pal space
3343
3344
3345//
3346// bugchk - PALcode for bugchk instruction
3347//
3348// Entry:
3349// Vectored into via hardware PALcode instruction dispatch.
3350//
3351// Function:
3352// Build stack frame
3353// a0 <- code
3354// a1 <- unpred
3355// a2 <- unpred
3356// vector via entIF
3357//
3358//
3359//
3360 CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
3361Call_Pal_Bugchk:
3362 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3363 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3364
3365 bis r11, r31, r12 // Save PS for stack write
3366 bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
3367
3368 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3369 // no virt ref for next 2 cycles
3370 mtpr r30, pt_usp // save user stack
3371
3372 bis r31, r31, r11 // Set new PS
3373 mfpr r30, pt_ksp
3374
3375CALL_PAL_bugchk_10_:
3376 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3377 mfpr r14, exc_addr // get pc
3378
3379 stq r16, osfsf_a0(sp) // save regs
3380 bis r31, osf_a0_bugchk, r16 // set a0
3381
3382 stq r17, osfsf_a1(sp) // a1
3383 br r31, bpt_bchk_common // out of call_pal space
3384
3385
3386 CALL_PAL_UNPRIV(0x0082)
3387CallPal_OpcDec82:
3388 br r31, osfpal_calpal_opcdec
3389
3390//
3391// callsys - PALcode for callsys instruction
3392//
3393// Entry:
3394// Vectored into via hardware PALcode instruction dispatch.
3395//
3396// Function:
3397// Switch mode to kernel and build a callsys stack frame.
3398// sp = ksp
3399// gp = kgp
3400// t8 - t10 (r22-r24) trashed
3401//
3402//
3403//
3404 CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
3405Call_Pal_Callsys:
3406
3407 and r11, osfps_m_mode, r24 // get mode
3408 mfpr r22, pt_ksp // get ksp
3409
3410 beq r24, sys_from_kern // sysCall from kern is not allowed
3411 mfpr r12, pt_entsys // get address of callSys routine
3412
3413//
3414// from here on we know we are in user going to Kern
3415//
3416 mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
3417 mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
3418
3419 bis r31, r31, r11 // PS=0 (mode=kern)
3420 mfpr r23, exc_addr // get pc
3421
3422 mtpr r30, pt_usp // save usp
3423 lda sp, 0-osfsf_c_size(r22)// set new sp
3424
3425 stq r29, osfsf_gp(sp) // save user gp/r29
3426 stq r24, osfsf_ps(sp) // save ps
3427
3428 stq r23, osfsf_pc(sp) // save pc
3429 mtpr r12, exc_addr // set address
3430 // 1 cycle to hw_rei
3431
3432 mfpr r29, pt_kgp // get the kern gp/r29
3433
3434 hw_rei_spe // and off we go!
3435
3436
3437 CALL_PAL_UNPRIV(0x0084)
3438CallPal_OpcDec84:
3439 br r31, osfpal_calpal_opcdec
3440
3441 CALL_PAL_UNPRIV(0x0085)
3442CallPal_OpcDec85:
3443 br r31, osfpal_calpal_opcdec
3444
3445//
3446// imb - PALcode for imb instruction
3447//
3448// Entry:
3449// Vectored into via hardware PALcode instruction dispatch.
3450//
3451// Function:
3452// Flush the writebuffer and flush the Icache
3453//
3454//
3455//
3456 CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
3457Call_Pal_Imb:
3458 mb // Clear the writebuffer
3459 mfpr r31, ev5__mcsr // Sync with clear
3460 nop
3461 nop
3462 br r31, pal_ic_flush // Flush Icache
3463
3464
3465// CALL_PAL OPCDECs
3466
3467 CALL_PAL_UNPRIV(0x0087)
3468CallPal_OpcDec87:
3469 br r31, osfpal_calpal_opcdec
3470
3471 CALL_PAL_UNPRIV(0x0088)
3472CallPal_OpcDec88:
3473 br r31, osfpal_calpal_opcdec
3474
3475 CALL_PAL_UNPRIV(0x0089)
3476CallPal_OpcDec89:
3477 br r31, osfpal_calpal_opcdec
3478
3479 CALL_PAL_UNPRIV(0x008A)
3480CallPal_OpcDec8A:
3481 br r31, osfpal_calpal_opcdec
3482
3483 CALL_PAL_UNPRIV(0x008B)
3484CallPal_OpcDec8B:
3485 br r31, osfpal_calpal_opcdec
3486
3487 CALL_PAL_UNPRIV(0x008C)
3488CallPal_OpcDec8C:
3489 br r31, osfpal_calpal_opcdec
3490
3491 CALL_PAL_UNPRIV(0x008D)
3492CallPal_OpcDec8D:
3493 br r31, osfpal_calpal_opcdec
3494
3495 CALL_PAL_UNPRIV(0x008E)
3496CallPal_OpcDec8E:
3497 br r31, osfpal_calpal_opcdec
3498
3499 CALL_PAL_UNPRIV(0x008F)
3500CallPal_OpcDec8F:
3501 br r31, osfpal_calpal_opcdec
3502
3503 CALL_PAL_UNPRIV(0x0090)
3504CallPal_OpcDec90:
3505 br r31, osfpal_calpal_opcdec
3506
3507 CALL_PAL_UNPRIV(0x0091)
3508CallPal_OpcDec91:
3509 br r31, osfpal_calpal_opcdec
3510
3511 CALL_PAL_UNPRIV(0x0092)
3512CallPal_OpcDec92:
3513 br r31, osfpal_calpal_opcdec
3514
3515 CALL_PAL_UNPRIV(0x0093)
3516CallPal_OpcDec93:
3517 br r31, osfpal_calpal_opcdec
3518
3519 CALL_PAL_UNPRIV(0x0094)
3520CallPal_OpcDec94:
3521 br r31, osfpal_calpal_opcdec
3522
3523 CALL_PAL_UNPRIV(0x0095)
3524CallPal_OpcDec95:
3525 br r31, osfpal_calpal_opcdec
3526
3527 CALL_PAL_UNPRIV(0x0096)
3528CallPal_OpcDec96:
3529 br r31, osfpal_calpal_opcdec
3530
3531 CALL_PAL_UNPRIV(0x0097)
3532CallPal_OpcDec97:
3533 br r31, osfpal_calpal_opcdec
3534
3535 CALL_PAL_UNPRIV(0x0098)
3536CallPal_OpcDec98:
3537 br r31, osfpal_calpal_opcdec
3538
3539 CALL_PAL_UNPRIV(0x0099)
3540CallPal_OpcDec99:
3541 br r31, osfpal_calpal_opcdec
3542
3543 CALL_PAL_UNPRIV(0x009A)
3544CallPal_OpcDec9A:
3545 br r31, osfpal_calpal_opcdec
3546
3547 CALL_PAL_UNPRIV(0x009B)
3548CallPal_OpcDec9B:
3549 br r31, osfpal_calpal_opcdec
3550
3551 CALL_PAL_UNPRIV(0x009C)
3552CallPal_OpcDec9C:
3553 br r31, osfpal_calpal_opcdec
3554
3555 CALL_PAL_UNPRIV(0x009D)
3556CallPal_OpcDec9D:
3557 br r31, osfpal_calpal_opcdec
3558
3559//
3560// rdunique - PALcode for rdunique instruction
3561//
3562// Entry:
3563// Vectored into via hardware PALcode instruction dispatch.
3564//
3565// Function:
3566// v0 (r0) <- unique
3567//
3568//
3569//
3570 CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
3571CALL_PALrdunique_:
3572 mfpr r0, pt_pcbb // get pcb pointer
3573 ldq_p r0, osfpcb_q_unique(r0) // get new value
3574
3575 hw_rei
3576
3577//
3578// wrunique - PALcode for wrunique instruction
3579//
3580// Entry:
3581// Vectored into via hardware PALcode instruction dispatch.
3582//
3583// Function:
3584// unique <- a0 (r16)
3585//
3586//
3587//
3588CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
3589CALL_PAL_Wrunique:
3590 nop
3591 mfpr r12, pt_pcbb // get pcb pointer
3592 stq_p r16, osfpcb_q_unique(r12)// get new value
3593 nop // Pad palshadow write
3594 hw_rei // back
3595
3596// CALL_PAL OPCDECs
3597
3598 CALL_PAL_UNPRIV(0x00A0)
3599CallPal_OpcDecA0:
3600 br r31, osfpal_calpal_opcdec
3601
3602 CALL_PAL_UNPRIV(0x00A1)
3603CallPal_OpcDecA1:
3604 br r31, osfpal_calpal_opcdec
3605
3606 CALL_PAL_UNPRIV(0x00A2)
3607CallPal_OpcDecA2:
3608 br r31, osfpal_calpal_opcdec
3609
3610 CALL_PAL_UNPRIV(0x00A3)
3611CallPal_OpcDecA3:
3612 br r31, osfpal_calpal_opcdec
3613
3614 CALL_PAL_UNPRIV(0x00A4)
3615CallPal_OpcDecA4:
3616 br r31, osfpal_calpal_opcdec
3617
3618 CALL_PAL_UNPRIV(0x00A5)
3619CallPal_OpcDecA5:
3620 br r31, osfpal_calpal_opcdec
3621
3622 CALL_PAL_UNPRIV(0x00A6)
3623CallPal_OpcDecA6:
3624 br r31, osfpal_calpal_opcdec
3625
3626 CALL_PAL_UNPRIV(0x00A7)
3627CallPal_OpcDecA7:
3628 br r31, osfpal_calpal_opcdec
3629
3630 CALL_PAL_UNPRIV(0x00A8)
3631CallPal_OpcDecA8:
3632 br r31, osfpal_calpal_opcdec
3633
3634 CALL_PAL_UNPRIV(0x00A9)
3635CallPal_OpcDecA9:
3636 br r31, osfpal_calpal_opcdec
3637
3638
3639//
3640// gentrap - PALcode for gentrap instruction
3641//
3642// CALL_PAL_gentrap:
3643// Entry:
3644// Vectored into via hardware PALcode instruction dispatch.
3645//
3646// Function:
3647// Build stack frame
3648// a0 <- code
3649// a1 <- unpred
3650// a2 <- unpred
3651// vector via entIF
3652//
3653//
3654
3655 CALL_PAL_UNPRIV(0x00AA)
3656// unsupported in Hudson code .. pboyle Nov/95
3657CALL_PAL_gentrap:
3658 sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
3659 mtpr r31, ev5__ps // Set Ibox current mode to kernel
3660
3661 bis r11, r31, r12 // Save PS for stack write
3662 bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
3663
3664 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
3665 // no virt ref for next 2 cycles
3666 mtpr r30, pt_usp // save user stack
3667
3668 bis r31, r31, r11 // Set new PS
3669 mfpr r30, pt_ksp
3670
3671CALL_PAL_gentrap_10_:
3672 lda sp, 0-osfsf_c_size(sp)// allocate stack space
3673 mfpr r14, exc_addr // get pc
3674
3675 stq r16, osfsf_a0(sp) // save regs
3676 bis r31, osf_a0_gentrap, r16// set a0
3677
3678 stq r17, osfsf_a1(sp) // a1
3679 br r31, bpt_bchk_common // out of call_pal space
3680
3681
3682// CALL_PAL OPCDECs
3683
3684 CALL_PAL_UNPRIV(0x00AB)
3685CallPal_OpcDecAB:
3686 br r31, osfpal_calpal_opcdec
3687
3688 CALL_PAL_UNPRIV(0x00AC)
3689CallPal_OpcDecAC:
3690 br r31, osfpal_calpal_opcdec
3691
3692 CALL_PAL_UNPRIV(0x00AD)
3693CallPal_OpcDecAD:
3694 br r31, osfpal_calpal_opcdec
3695
3696 CALL_PAL_UNPRIV(0x00AE)
3697CallPal_OpcDecAE:
3698 br r31, osfpal_calpal_opcdec
3699
3700 CALL_PAL_UNPRIV(0x00AF)
3701CallPal_OpcDecAF:
3702 br r31, osfpal_calpal_opcdec
3703
3704 CALL_PAL_UNPRIV(0x00B0)
3705CallPal_OpcDecB0:
3706 br r31, osfpal_calpal_opcdec
3707
3708 CALL_PAL_UNPRIV(0x00B1)
3709CallPal_OpcDecB1:
3710 br r31, osfpal_calpal_opcdec
3711
3712 CALL_PAL_UNPRIV(0x00B2)
3713CallPal_OpcDecB2:
3714 br r31, osfpal_calpal_opcdec
3715
3716 CALL_PAL_UNPRIV(0x00B3)
3717CallPal_OpcDecB3:
3718 br r31, osfpal_calpal_opcdec
3719
3720 CALL_PAL_UNPRIV(0x00B4)
3721CallPal_OpcDecB4:
3722 br r31, osfpal_calpal_opcdec
3723
3724 CALL_PAL_UNPRIV(0x00B5)
3725CallPal_OpcDecB5:
3726 br r31, osfpal_calpal_opcdec
3727
3728 CALL_PAL_UNPRIV(0x00B6)
3729CallPal_OpcDecB6:
3730 br r31, osfpal_calpal_opcdec
3731
3732 CALL_PAL_UNPRIV(0x00B7)
3733CallPal_OpcDecB7:
3734 br r31, osfpal_calpal_opcdec
3735
3736 CALL_PAL_UNPRIV(0x00B8)
3737CallPal_OpcDecB8:
3738 br r31, osfpal_calpal_opcdec
3739
3740 CALL_PAL_UNPRIV(0x00B9)
3741CallPal_OpcDecB9:
3742 br r31, osfpal_calpal_opcdec
3743
3744 CALL_PAL_UNPRIV(0x00BA)
3745CallPal_OpcDecBA:
3746 br r31, osfpal_calpal_opcdec
3747
3748 CALL_PAL_UNPRIV(0x00BB)
3749CallPal_OpcDecBB:
3750 br r31, osfpal_calpal_opcdec
3751
3752 CALL_PAL_UNPRIV(0x00BC)
3753CallPal_OpcDecBC:
3754 br r31, osfpal_calpal_opcdec
3755
3756 CALL_PAL_UNPRIV(0x00BD)
3757CallPal_OpcDecBD:
3758 br r31, osfpal_calpal_opcdec
3759
3760 CALL_PAL_UNPRIV(0x00BE)
3761CallPal_OpcDecBE:
3762 br r31, osfpal_calpal_opcdec
3763
3764 CALL_PAL_UNPRIV(0x00BF)
3765CallPal_OpcDecBF:
3766 // MODIFIED BY EGH 2/25/04
3767 br r31, copypal_impl
3768
3769
3770/*======================================================================*/
3771/* OSF/1 CALL_PAL CONTINUATION AREA */
3772/*======================================================================*/
3773
3774 .text 2
3775
3776 . = 0x4000
3777
3778
3779// Continuation of MTPR_PERFMON
3780 ALIGN_BLOCK
3781 // "real" performance monitoring code
3782// mux ctl
3783perfmon_muxctl:
3784 lda r8, 1(r31) // get a 1
3785 sll r8, pmctr_v_sel0, r8 // move to sel0 position
3786 or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
3787 and r17, r8, r25 // isolate pmctr mux select bits
3788 mfpr r0, ev5__pmctr
3789 bic r0, r8, r0 // clear old mux select bits
3790 or r0,r25, r25 // or in new mux select bits
3791 mtpr r25, ev5__pmctr
3792
3793 // ok, now tackle cbox mux selects
3794 ldah r14, 0xfff0(r31)
3795 zap r14, 0xE0, r14 // Get Cbox IPR base
3796//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
3797// adapted from ev5_pal_macros.mar
3798 mfpr r16, pt_impure
3799 lda r16, CNS_Q_IPR(r16)
3800 RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
3801
3802 lda r8, 0x3F(r31) // build mux select mask
3803 sll r8, bc_ctl_v_pm_mux_sel, r8
3804
3805 and r17, r8, r25 // isolate bc_ctl mux select bits
3806 bic r16, r8, r16 // isolate old mux select bits
3807 or r16, r25, r25 // create new bc_ctl
3808 mb // clear out cbox for future ipr write
3809 stq_p r25, ev5__bc_ctl(r14) // store to cbox ipr
3810 mb // clear out cbox for future ipr write
3811
3812//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
3813// adapted from ev5_pal_macros.mar
3814 mfpr r16, pt_impure
3815 lda r16, CNS_Q_IPR(r16)
3816 SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
3817
3818 br r31, perfmon_success
3819
3820
3821// requested to disable perf monitoring
3822perfmon_dis:
3823 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3824perfmon_dis_ctr0: // and begin with ctr0
3825 blbc r17, perfmon_dis_ctr1 // do not disable ctr0
3826 lda r8, 3(r31)
3827 sll r8, pmctr_v_ctl0, r8
3828 bic r14, r8, r14 // disable ctr0
3829perfmon_dis_ctr1:
3830 srl r17, 1, r17
3831 blbc r17, perfmon_dis_ctr2 // do not disable ctr1
3832 lda r8, 3(r31)
3833 sll r8, pmctr_v_ctl1, r8
3834 bic r14, r8, r14 // disable ctr1
3835perfmon_dis_ctr2:
3836 srl r17, 1, r17
3837 blbc r17, perfmon_dis_update // do not disable ctr2
3838 lda r8, 3(r31)
3839 sll r8, pmctr_v_ctl2, r8
3840 bic r14, r8, r14 // disable ctr2
3841perfmon_dis_update:
3842 mtpr r14, ev5__pmctr // update pmctr ipr
3843//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
3844// adapted from ev5_pal_macros.mar
3845//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
3846 mfpr r25, pt_impure
3847 lda r25, CNS_Q_IPR(r25)
3848 RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
3849
3850 lda r17, 0x3F(r31) // build mask
3851 sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
3852 and r14, r17, r14 // isolate ctl bits
3853 bic r8, r17, r8 // clear out old ctl bits
3854 or r14, r8, r14 // create shadow ctl bits
3855//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
3856//adjusted impure pointer still in r25
3857 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
3858
3859 br r31, perfmon_success
3860
3861
3862// requested to enable perf monitoring
3863//;the following code can be greatly simplified for pass2, but should work fine as is.
3864
3865
3866perfmon_enclr:
3867 lda r9, 1(r31) // set enclr flag
3868 br perfmon_en_cont
3869
3870perfmon_en:
3871 bis r31, r31, r9 // clear enclr flag
3872
3873perfmon_en_cont:
3874 mfpr r8, pt_pcbb // get PCB base
3875//orig get_pmctr_ctl r25, r25
3876 mfpr r25, pt_impure
3877 lda r25, CNS_Q_IPR(r25)
3878 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
3879
3880 ldq_p r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
3881 mfpr r14, ev5__pmctr // read ibox pmctr ipr
3882 srl r16, osfpcb_v_pme, r16 // get pme bit
3883 mfpr r13, icsr
3884 and r16, 1, r16 // isolate pme bit
3885
3886 // this code only needed in pass2 and later
3887 lda r12, 1<<icsr_v_pmp(r31) // pb
3888 bic r13, r12, r13 // clear pmp bit
3889 sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
3890 or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
3891 mtpr r13, icsr // update icsr
3892
3893 bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
3894
3895 sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
3896 bis r14, r31, r13 // copy pmctr
3897
3898perfmon_en_ctr0: // and begin with ctr0
3899 blbc r17, perfmon_en_ctr1 // do not enable ctr0
3900
3901 blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
3902 lda r8, 0xffff(r31)
3903 zapnot r8, 3, r8 // ctr0<15:0> mask
3904 sll r8, pmctr_v_ctr0, r8
3905 bic r14, r8, r14 // clear ctr bits
3906 bic r13, r8, r13 // clear ctr bits
3907
3908perfmon_en_noclr0:
3909//orig get_addr r8, 3<<pmctr_v_ctl0, r31
3910 LDLI(r8, (3<<pmctr_v_ctl0))
3911 and r25, r8, r12 //isolate frequency select bits for ctr0
3912 bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
3913 or r14,r12,r14 // or in new ctl0 bits
3914
3915perfmon_en_ctr1: // enable ctr1
3916 srl r17, 1, r17 // get ctr1 enable
3917 blbc r17, perfmon_en_ctr2 // do not enable ctr1
3918
3919 blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
3920 lda r8, 0xffff(r31)
3921 zapnot r8, 3, r8 // ctr1<15:0> mask
3922 sll r8, pmctr_v_ctr1, r8
3923 bic r14, r8, r14 // clear ctr bits
3924 bic r13, r8, r13 // clear ctr bits
3925
3926perfmon_en_noclr1:
3927//orig get_addr r8, 3<<pmctr_v_ctl1, r31
3928 LDLI(r8, (3<<pmctr_v_ctl1))
3929 and r25, r8, r12 //isolate frequency select bits for ctr1
3930 bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
3931 or r14,r12,r14 // or in new ctl1 bits
3932
3933perfmon_en_ctr2: // enable ctr2
3934 srl r17, 1, r17 // get ctr2 enable
3935 blbc r17, perfmon_en_return // do not enable ctr2 - return
3936
3937 blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
3938 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
3939 sll r8, pmctr_v_ctr2, r8
3940 bic r14, r8, r14 // clear ctr bits
3941 bic r13, r8, r13 // clear ctr bits
3942
3943perfmon_en_noclr2:
3944//orig get_addr r8, 3<<pmctr_v_ctl2, r31
3945 LDLI(r8, (3<<pmctr_v_ctl2))
3946 and r25, r8, r12 //isolate frequency select bits for ctr2
3947 bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
3948 or r14,r12,r14 // or in new ctl2 bits
3949
3950perfmon_en_return:
3951 cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
3952 // else only do the counter clears
3953 mtpr r13, ev5__pmctr // update pmctr ipr
3954
3955//;this code not needed for pass2 and later, but does not hurt to leave it in
3956 lda r8, 0x3F(r31)
3957//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
3958 mfpr r12, pt_impure
3959 lda r12, CNS_Q_IPR(r12)
3960 RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
3961
3962 sll r8, pmctr_v_ctl2, r8 // build ctl mask
3963 and r8, r14, r14 // isolate new ctl bits
3964 bic r25, r8, r25 // clear out old ctl value
3965 or r25, r14, r14 // create new pmctr_ctl
3966//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
3967 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
3968
3969 br r31, perfmon_success
3970
3971
3972// options...
3973perfmon_ctl:
3974
3975// set mode
3976//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
3977 mfpr r12, pt_impure
3978 lda r12, CNS_Q_IPR(r12)
3979 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
3980
3981 // build mode mask for pmctr register
3982 LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
3983 mfpr r0, ev5__pmctr
3984 and r17, r8, r25 // isolate pmctr mode bits
3985 bic r0, r8, r0 // clear old mode bits
3986 or r0, r25, r25 // or in new mode bits
3987 mtpr r25, ev5__pmctr
3988
3989 // the following code will only be used in pass2, but should
3990 // not hurt anything if run in pass1.
3991 mfpr r8, icsr
3992 lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
3993 bic r8, r25, r8 // clear old pma bit
3994 cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
3995 or r8, r25, r8
3996 mtpr r8, icsr // 4 bubbles to hw_rei
3997 mfpr r31, pt0 // pad icsr write
3998 mfpr r31, pt0 // pad icsr write
3999
4000 // the following code not needed for pass2 and later, but
4001 // should work anyway.
4002 bis r14, 1, r14 // set for select processes
4003 blbs r17, perfmon_sp // branch if select processes
4004 bic r14, 1, r14 // all processes
4005perfmon_sp:
4006//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4007 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4008 br r31, perfmon_success
4009
4010// counter frequency select
4011perfmon_freq:
4012//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
4013 mfpr r12, pt_impure
4014 lda r12, CNS_Q_IPR(r12)
4015 RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
4016
4017 lda r8, 0x3F(r31)
4018//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
4019// I guess this should be a shift of 4 bits from the above control register structure
4020#define pmctr_ctl_v_frq2_SHIFT 4
4021 sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
4022
4023 and r8, r17, r17
4024 bic r14, r8, r14 // clear out old frequency select bits
4025
4026 or r17, r14, r14 // or in new frequency select info
4027//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
4028 SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
4029
4030 br r31, perfmon_success
4031
4032// read counters
4033perfmon_rd:
4034 mfpr r0, ev5__pmctr
4035 or r0, 1, r0 // or in return status
4036 hw_rei // back to user
4037
4038// write counters
4039perfmon_wr:
4040 mfpr r14, ev5__pmctr
4041 lda r8, 0x3FFF(r31) // ctr2<13:0> mask
4042 sll r8, pmctr_v_ctr2, r8
4043
4044 LDLI(r9, (0xFFFFFFFF)) // ctr2<15:0>,ctr1<15:0> mask
4045 sll r9, pmctr_v_ctr1, r9
4046 or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
4047 bic r14, r8, r14 // clear ctr fields
4048 and r17, r8, r25 // clear all but ctr fields
4049 or r25, r14, r14 // write ctr fields
4050 mtpr r14, ev5__pmctr // update pmctr ipr
4051
4052 mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
4053
4054perfmon_success:
4055 or r31, 1, r0 // set success
4056 hw_rei // back to user
4057
4058perfmon_unknown:
4059 or r31, r31, r0 // set fail
4060 hw_rei // back to user
4061
4062
4063//////////////////////////////////////////////////////////
4064// Copy code
4065//////////////////////////////////////////////////////////
4066
4067copypal_impl:
4068 mov r16, r0
4069#ifdef CACHE_COPY
4070#ifndef CACHE_COPY_UNALIGNED
4071 and r16, 63, r8
4072 and r17, 63, r9
4073 bis r8, r9, r8
4074 bne r8, cache_copy_done
4075#endif
4076 bic r18, 63, r8
4077 and r18, 63, r18
4078 beq r8, cache_copy_done
4079cache_loop:
4080 ldf f17, 0(r16)
4081 stf f17, 0(r16)
4082 addq r17, 64, r17
4083 addq r16, 64, r16
4084 subq r8, 64, r8
4085 bne r8, cache_loop
4086cache_copy_done:
4087#endif
4088 ble r18, finished // if len <=0 we are finished
4089 ldq_u r8, 0(r17)
4090 xor r17, r16, r9
4091 and r9, 7, r9
4092 and r16, 7, r10
4093 bne r9, unaligned
4094 beq r10, aligned
4095 ldq_u r9, 0(r16)
4096 addq r18, r10, r18
4097 mskqh r8, r17, r8
4098 mskql r9, r17, r9
4099 bis r8, r9, r8
4100aligned:
4101 subq r18, 1, r10
4102 bic r10, 7, r10
4103 and r18, 7, r18
4104 beq r10, aligned_done
4105loop:
4106 stq_u r8, 0(r16)
4107 ldq_u r8, 8(r17)
4108 subq r10, 8, r10
4109 lda r16,8(r16)
4110 lda r17,8(r17)
4111 bne r10, loop
4112aligned_done:
4113 bne r18, few_left
4114 stq_u r8, 0(r16)
4115 br r31, finished
4116 few_left:
4117 mskql r8, r18, r10
4118 ldq_u r9, 0(r16)
4119 mskqh r9, r18, r9
4120 bis r10, r9, r10
4121 stq_u r10, 0(r16)
4122 br r31, finished
4123unaligned:
4124 addq r17, r18, r25
4125 cmpule r18, 8, r9
4126 bne r9, unaligned_few_left
4127 beq r10, unaligned_dest_aligned
4128 and r16, 7, r10
4129 subq r31, r10, r10
4130 addq r10, 8, r10
4131 ldq_u r9, 7(r17)
4132 extql r8, r17, r8
4133 extqh r9, r17, r9
4134 bis r8, r9, r12
4135 insql r12, r16, r12
4136 ldq_u r13, 0(r16)
4137 mskql r13, r16, r13
4138 bis r12, r13, r12
4139 stq_u r12, 0(r16)
4140 addq r16, r10, r16
4141 addq r17, r10, r17
4142 subq r18, r10, r18
4143 ldq_u r8, 0(r17)
4144unaligned_dest_aligned:
4145 subq r18, 1, r10
4146 bic r10, 7, r10
4147 and r18, 7, r18
4148 beq r10, unaligned_partial_left
4149unaligned_loop:
4150 ldq_u r9, 7(r17)
4151 lda r17, 8(r17)
4152 extql r8, r17, r12
4153 extqh r9, r17, r13
4154 subq r10, 8, r10
4155 bis r12, r13, r13
4156 stq r13, 0(r16)
4157 lda r16, 8(r16)
4158 beq r10, unaligned_second_partial_left
4159 ldq_u r8, 7(r17)
4160 lda r17, 8(r17)
4161 extql r9, r17, r12
4162 extqh r8, r17, r13
4163 bis r12, r13, r13
4164 subq r10, 8, r10
4165 stq r13, 0(r16)
4166 lda r16, 8(r16)
4167 bne r10, unaligned_loop
4168unaligned_partial_left:
4169 mov r8, r9
4170unaligned_second_partial_left:
4171 ldq_u r8, -1(r25)
4172 extql r9, r17, r9
4173 extqh r8, r17, r8
4174 bis r8, r9, r8
4175 bne r18, few_left
4176 stq_u r8, 0(r16)
4177 br r31, finished
4178unaligned_few_left:
4179 ldq_u r9, -1(r25)
4180 extql r8, r17, r8
4181 extqh r9, r17, r9
4182 bis r8, r9, r8
4183 insqh r8, r16, r9
4184 insql r8, r16, r8
4185 lda r12, -1(r31)
4186 mskql r12, r18, r13
4187 cmovne r13, r13, r12
4188 insqh r12, r16, r13
4189 insql r12, r16, r12
4190 addq r16, r18, r10
4191 ldq_u r14, 0(r16)
4192 ldq_u r25, -1(r10)
4193 bic r14, r12, r14
4194 bic r25, r13, r25
4195 and r8, r12, r8
4196 and r9, r13, r9
4197 bis r8, r14, r8
4198 bis r9, r25, r9
4199 stq_u r9, -1(r10)
4200 stq_u r8, 0(r16)
4201finished:
4202 hw_rei