Deleted Added
sdiff udiff text old ( 12334:e0ab29a34764 ) new ( 13413:b84a7c832ead )
full compact
1/*
2 * Copyright (c) 2014 The University of Wisconsin
3 *
4 * Copyright (c) 2006 INRIA (Institut National de Recherche en
5 * Informatique et en Automatique / French National Research Institute
6 * for Computer Science and Applied Mathematics)
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met: redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer;
14 * redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution;
17 * neither the name of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Authors: Vignyan Reddy, Dibakar Gope and Arthur Perais,
34 * from André Seznec's code.
35 */
36
37/* @file
38 * Implementation of a L-TAGE branch predictor
39 */
40
41#include "cpu/pred/ltage.hh"
42
43#include "base/intmath.hh"
44#include "base/logging.hh"
45#include "base/random.hh"
46#include "base/trace.hh"
47#include "debug/Fetch.hh"
48#include "debug/LTage.hh"
49
50LTAGE::LTAGE(const LTAGEParams *params)
51 : BPredUnit(params),
52 logSizeBiMP(params->logSizeBiMP),
53 logSizeTagTables(params->logSizeTagTables),
54 logSizeLoopPred(params->logSizeLoopPred),
55 nHistoryTables(params->nHistoryTables),
56 tagTableCounterBits(params->tagTableCounterBits),
57 histBufferSize(params->histBufferSize),
58 minHist(params->minHist),
59 maxHist(params->maxHist),
60 minTagWidth(params->minTagWidth),
61 threadHistory(params->numThreads)
62{
63 assert(params->histBufferSize > params->maxHist * 2);
64 useAltPredForNewlyAllocated = 0;
65 logTick = 19;
66 tCounter = ULL(1) << (logTick - 1);
67
68 for (auto& history : threadHistory) {
69 history.pathHist = 0;
70 history.globalHistory = new uint8_t[histBufferSize];
71 history.gHist = history.globalHistory;
72 memset(history.gHist, 0, histBufferSize);
73 history.ptGhist = 0;
74 }
75
76 histLengths = new int [nHistoryTables+1];
77 histLengths[1] = minHist;
78 histLengths[nHistoryTables] = maxHist;
79
80 for (int i = 2; i <= nHistoryTables; i++) {
81 histLengths[i] = (int) (((double) minHist *
82 pow ((double) (maxHist) / (double) minHist,
83 (double) (i - 1) / (double) ((nHistoryTables- 1))))
84 + 0.5);
85 }
86
87 tagWidths[1] = minTagWidth;
88 tagWidths[2] = minTagWidth;
89 tagWidths[3] = minTagWidth + 1;
90 tagWidths[4] = minTagWidth + 1;
91 tagWidths[5] = minTagWidth + 2;
92 tagWidths[6] = minTagWidth + 3;
93 tagWidths[7] = minTagWidth + 4;
94 tagWidths[8] = minTagWidth + 5;
95 tagWidths[9] = minTagWidth + 5;
96 tagWidths[10] = minTagWidth + 6;
97 tagWidths[11] = minTagWidth + 7;
98 tagWidths[12] = minTagWidth + 8;
99
100 for (int i = 1; i <= 2; i++)
101 tagTableSizes[i] = logSizeTagTables - 1;
102 for (int i = 3; i <= 6; i++)
103 tagTableSizes[i] = logSizeTagTables;
104 for (int i = 7; i <= 10; i++)
105 tagTableSizes[i] = logSizeTagTables - 1;
106 for (int i = 11; i <= 12; i++)
107 tagTableSizes[i] = logSizeTagTables - 2;
108
109 for (auto& history : threadHistory) {
110 history.computeIndices = new FoldedHistory[nHistoryTables+1];
111 history.computeTags[0] = new FoldedHistory[nHistoryTables+1];
112 history.computeTags[1] = new FoldedHistory[nHistoryTables+1];
113
114 for (int i = 1; i <= nHistoryTables; i++) {
115 history.computeIndices[i].init(histLengths[i], (tagTableSizes[i]));
116 history.computeTags[0][i].init(
117 history.computeIndices[i].origLength, tagWidths[i]);
118 history.computeTags[1][i].init(
119 history.computeIndices[i].origLength, tagWidths[i] - 1);
120 DPRINTF(LTage, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
121 histLengths[i], tagTableSizes[i], tagWidths[i]);
122 }
123 }
124
125 btable = new BimodalEntry[ULL(1) << logSizeBiMP];
126 ltable = new LoopEntry[ULL(1) << logSizeLoopPred];
127 gtable = new TageEntry*[nHistoryTables + 1];
128 for (int i = 1; i <= nHistoryTables; i++) {
129 gtable[i] = new TageEntry[1<<(tagTableSizes[i])];
130 }
131
132 tableIndices = new int [nHistoryTables+1];
133 tableTags = new int [nHistoryTables+1];
134
135 loopUseCounter = 0;
136}
137
138int
139LTAGE::bindex(Addr pc_in) const
140{
141 return ((pc_in >> instShiftAmt) & ((ULL(1) << (logSizeBiMP)) - 1));
142}
143
144int
145LTAGE::lindex(Addr pc_in) const
146{
147 return (((pc_in >> instShiftAmt) &
148 ((ULL(1) << (logSizeLoopPred - 2)) - 1)) << 2);
149}
150
151int
152LTAGE::F(int A, int size, int bank) const
153{
154 int A1, A2;
155
156 A = A & ((ULL(1) << size) - 1);
157 A1 = (A & ((ULL(1) << tagTableSizes[bank]) - 1));
158 A2 = (A >> tagTableSizes[bank]);
159 A2 = ((A2 << bank) & ((ULL(1) << tagTableSizes[bank]) - 1))
160 + (A2 >> (tagTableSizes[bank] - bank));
161 A = A1 ^ A2;
162 A = ((A << bank) & ((ULL(1) << tagTableSizes[bank]) - 1))
163 + (A >> (tagTableSizes[bank] - bank));
164 return (A);
165}
166
167
168// gindex computes a full hash of pc, ghist and pathHist
169int
170LTAGE::gindex(ThreadID tid, Addr pc, int bank) const
171{
172 int index;
173 int hlen = (histLengths[bank] > 16) ? 16 : histLengths[bank];
174 index =
175 (pc >> instShiftAmt) ^
176 ((pc >> instShiftAmt) >> ((int) abs(tagTableSizes[bank] - bank) + 1)) ^
177 threadHistory[tid].computeIndices[bank].comp ^
178 F(threadHistory[tid].pathHist, hlen, bank);
179
180 return (index & ((ULL(1) << (tagTableSizes[bank])) - 1));
181}
182
183
184// Tag computation
185uint16_t
186LTAGE::gtag(ThreadID tid, Addr pc, int bank) const
187{
188 int tag = (pc >> instShiftAmt) ^
189 threadHistory[tid].computeTags[0][bank].comp ^
190 (threadHistory[tid].computeTags[1][bank].comp << 1);
191
192 return (tag & ((ULL(1) << tagWidths[bank]) - 1));
193}
194
195
196// Up-down saturating counter
197void
198LTAGE::ctrUpdate(int8_t & ctr, bool taken, int nbits)
199{
200 assert(nbits <= sizeof(int8_t) << 3);
201 if (taken) {
202 if (ctr < ((1 << (nbits - 1)) - 1))
203 ctr++;
204 } else {
205 if (ctr > -(1 << (nbits - 1)))
206 ctr--;
207 }
208}
209
210// Bimodal prediction
211bool
212LTAGE::getBimodePred(Addr pc, BranchInfo* bi) const
213{
214 return (btable[bi->bimodalIndex].pred > 0);
215}
216
217
218// Update the bimodal predictor: a hysteresis bit is shared among 4 prediction
219// bits
220void
221LTAGE::baseUpdate(Addr pc, bool taken, BranchInfo* bi)
222{
223 int inter = (btable[bi->bimodalIndex].pred << 1)
224 + btable[bi->bimodalIndex ].hyst;
225 if (taken) {
226 if (inter < 3)
227 inter++;
228 } else if (inter > 0) {
229 inter--;
230 }
231 btable[bi->bimodalIndex].pred = inter >> 1;
232 btable[bi->bimodalIndex].hyst = (inter & 1);
233 DPRINTF(LTage, "Updating branch %lx, pred:%d, hyst:%d\n",
234 pc, btable[bi->bimodalIndex].pred,btable[bi->bimodalIndex].hyst);
235}
236
237
238//loop prediction: only used if high confidence
239bool
240LTAGE::getLoop(Addr pc, BranchInfo* bi) const
241{
242 bi->loopHit = -1;
243 bi->loopPredValid = false;
244 bi->loopIndex = lindex(pc);
245 bi->loopTag = ((pc) >> (instShiftAmt + logSizeLoopPred - 2));
246
247 for (int i = 0; i < 4; i++) {
248 if (ltable[bi->loopIndex + i].tag == bi->loopTag) {
249 bi->loopHit = i;
250 bi->loopPredValid = (ltable[bi->loopIndex + i].confidence >= 3);
251 bi->currentIter = ltable[bi->loopIndex + i].currentIterSpec;
252 if (ltable[bi->loopIndex + i].currentIterSpec + 1 ==
253 ltable[bi->loopIndex + i].numIter) {
254 return !(ltable[bi->loopIndex + i].dir);
255 }else {
256 return (ltable[bi->loopIndex + i].dir);
257 }
258 }
259 }
260 return false;
261}
262
263void
264LTAGE::specLoopUpdate(Addr pc, bool taken, BranchInfo* bi)
265{
266 if (bi->loopHit>=0) {
267 int index = lindex(pc);
268 if (taken != ltable[index].dir) {
269 ltable[index].currentIterSpec = 0;
270 } else {
271 ltable[index].currentIterSpec++;
272 }
273 }
274}
275
276void
277LTAGE::loopUpdate(Addr pc, bool taken, BranchInfo* bi)
278{
279 int idx = bi->loopIndex + bi->loopHit;
280 if (bi->loopHit >= 0) {
281 //already a hit
282 if (bi->loopPredValid) {
283 if (taken != bi->loopPred) {
284 // free the entry
285 ltable[idx].numIter = 0;
286 ltable[idx].age = 0;
287 ltable[idx].confidence = 0;
288 ltable[idx].currentIter = 0;
289 return;
290 } else if (bi->loopPred != bi->tagePred) {
291 DPRINTF(LTage, "Loop Prediction success:%lx\n",pc);
292 if (ltable[idx].age < 7)
293 ltable[idx].age++;
294 }
295 }
296
297 ltable[idx].currentIter++;
298 if (ltable[idx].currentIter > ltable[idx].numIter) {
299 ltable[idx].confidence = 0;
300 if (ltable[idx].numIter != 0) {
301 // free the entry
302 ltable[idx].numIter = 0;
303 ltable[idx].age = 0;
304 ltable[idx].confidence = 0;
305 }
306 }
307
308 if (taken != ltable[idx].dir) {
309 if (ltable[idx].currentIter == ltable[idx].numIter) {
310 DPRINTF(LTage, "Loop End predicted successfully:%lx\n", pc);
311
312 if (ltable[idx].confidence < 7) {
313 ltable[idx].confidence++;
314 }
315 //just do not predict when the loop count is 1 or 2
316 if (ltable[idx].numIter < 3) {
317 // free the entry
318 ltable[idx].dir = taken;
319 ltable[idx].numIter = 0;
320 ltable[idx].age = 0;
321 ltable[idx].confidence = 0;
322 }
323 } else {
324 DPRINTF(LTage, "Loop End predicted incorrectly:%lx\n", pc);
325 if (ltable[idx].numIter == 0) {
326 // first complete nest;
327 ltable[idx].confidence = 0;
328 ltable[idx].numIter = ltable[idx].currentIter;
329 } else {
330 //not the same number of iterations as last time: free the
331 //entry
332 ltable[idx].numIter = 0;
333 ltable[idx].age = 0;
334 ltable[idx].confidence = 0;
335 }
336 }
337 ltable[idx].currentIter = 0;
338 }
339
340 } else if (taken) {
341 //try to allocate an entry on taken branch
342 int nrand = random_mt.random<int>();
343 for (int i = 0; i < 4; i++) {
344 int loop_hit = (nrand + i) & 3;
345 idx = bi->loopIndex + loop_hit;
346 if (ltable[idx].age == 0) {
347 DPRINTF(LTage, "Allocating loop pred entry for branch %lx\n",
348 pc);
349 ltable[idx].dir = !taken;
350 ltable[idx].tag = bi->loopTag;
351 ltable[idx].numIter = 0;
352 ltable[idx].age = 7;
353 ltable[idx].confidence = 0;
354 ltable[idx].currentIter = 1;
355 break;
356
357 }
358 else
359 ltable[idx].age--;
360 }
361 }
362
363}
364
365// shifting the global history: we manage the history in a big table in order
366// to reduce simulation time
367void
368LTAGE::updateGHist(uint8_t * &h, bool dir, uint8_t * tab, int &pt)
369{
370 if (pt == 0) {
371 DPRINTF(LTage, "Rolling over the histories\n");
372 // Copy beginning of globalHistoryBuffer to end, such that
373 // the last maxHist outcomes are still reachable
374 // through pt[0 .. maxHist - 1].
375 for (int i = 0; i < maxHist; i++)
376 tab[histBufferSize - maxHist + i] = tab[i];
377 pt = histBufferSize - maxHist;
378 h = &tab[pt];
379 }
380 pt--;
381 h--;
382 h[0] = (dir) ? 1 : 0;
383}
384
385// Get GHR for hashing indirect predictor
386// Build history backwards from pointer in
387// bp_history.
388unsigned
389LTAGE::getGHR(ThreadID tid, void *bp_history) const
390{
391 BranchInfo* bi = static_cast<BranchInfo*>(bp_history);
392 unsigned val = 0;
393 for (unsigned i = 0; i < 32; i++) {
394 // Make sure we don't go out of bounds
395 int gh_offset = bi->ptGhist + i;
396 assert(&(threadHistory[tid].globalHistory[gh_offset]) <
397 threadHistory[tid].globalHistory + histBufferSize);
398 val |= ((threadHistory[tid].globalHistory[gh_offset] & 0x1) << i);
399 }
400
401 return val;
402}
403
404//prediction
405bool
406LTAGE::predict(ThreadID tid, Addr branch_pc, bool cond_branch, void* &b)
407{
408 BranchInfo *bi = new BranchInfo(nHistoryTables+1);
409 b = (void*)(bi);
410 Addr pc = branch_pc;
411 bool pred_taken = true;
412 bi->loopHit = -1;
413
414 if (cond_branch) {
415 // TAGE prediction
416
417 // computes the table addresses and the partial tags
418 for (int i = 1; i <= nHistoryTables; i++) {
419 tableIndices[i] = gindex(tid, pc, i);
420 bi->tableIndices[i] = tableIndices[i];
421 tableTags[i] = gtag(tid, pc, i);
422 bi->tableTags[i] = tableTags[i];
423 }
424
425 bi->bimodalIndex = bindex(pc);
426
427 bi->hitBank = 0;
428 bi->altBank = 0;
429 //Look for the bank with longest matching history
430 for (int i = nHistoryTables; i > 0; i--) {
431 if (gtable[i][tableIndices[i]].tag == tableTags[i]) {
432 bi->hitBank = i;
433 bi->hitBankIndex = tableIndices[bi->hitBank];
434 break;
435 }
436 }
437 //Look for the alternate bank
438 for (int i = bi->hitBank - 1; i > 0; i--) {
439 if (gtable[i][tableIndices[i]].tag == tableTags[i]) {
440 bi->altBank = i;
441 bi->altBankIndex = tableIndices[bi->altBank];
442 break;
443 }
444 }
445 //computes the prediction and the alternate prediction
446 if (bi->hitBank > 0) {
447 if (bi->altBank > 0) {
448 bi->altTaken =
449 gtable[bi->altBank][tableIndices[bi->altBank]].ctr >= 0;
450 }else {
451 bi->altTaken = getBimodePred(pc, bi);
452 }
453
454 bi->longestMatchPred =
455 gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr >= 0;
456 bi->pseudoNewAlloc =
457 abs(2 * gtable[bi->hitBank][bi->hitBankIndex].ctr + 1) <= 1;
458
459 //if the entry is recognized as a newly allocated entry and
460 //useAltPredForNewlyAllocated is positive use the alternate
461 //prediction
462 if ((useAltPredForNewlyAllocated < 0)
463 || abs(2 *
464 gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr + 1) > 1)
465 bi->tagePred = bi->longestMatchPred;
466 else
467 bi->tagePred = bi->altTaken;
468 } else {
469 bi->altTaken = getBimodePred(pc, bi);
470 bi->tagePred = bi->altTaken;
471 bi->longestMatchPred = bi->altTaken;
472 }
473 //end TAGE prediction
474
475 bi->loopPred = getLoop(pc, bi); // loop prediction
476
477 pred_taken = (((loopUseCounter >= 0) && bi->loopPredValid)) ?
478 (bi->loopPred): (bi->tagePred);
479 DPRINTF(LTage, "Predict for %lx: taken?:%d, loopTaken?:%d, "
480 "loopValid?:%d, loopUseCounter:%d, tagePred:%d, altPred:%d\n",
481 branch_pc, pred_taken, bi->loopPred, bi->loopPredValid,
482 loopUseCounter, bi->tagePred, bi->altTaken);
483 }
484 bi->branchPC = branch_pc;
485 bi->condBranch = cond_branch;
486 specLoopUpdate(branch_pc, pred_taken, bi);
487 return pred_taken;
488}
489
490// PREDICTOR UPDATE
491void
492LTAGE::update(ThreadID tid, Addr branch_pc, bool taken, void* bp_history,
493 bool squashed)
494{
495 assert(bp_history);
496
497 BranchInfo *bi = static_cast<BranchInfo*>(bp_history);
498
499 if (squashed) {
500 // This restores the global history, then update it
501 // and recomputes the folded histories.
502 squash(tid, taken, bp_history);
503 return;
504 }
505
506 int nrand = random_mt.random<int>(0,3);
507 Addr pc = branch_pc;
508 if (bi->condBranch) {
509 DPRINTF(LTage, "Updating tables for branch:%lx; taken?:%d\n",
510 branch_pc, taken);
511 // first update the loop predictor
512 loopUpdate(pc, taken, bi);
513
514 if (bi->loopPredValid) {
515 if (bi->tagePred != bi->loopPred) {
516 ctrUpdate(loopUseCounter, (bi->loopPred== taken), 7);
517 }
518 }
519
520 // TAGE UPDATE
521 // try to allocate a new entries only if prediction was wrong
522 bool longest_match_pred = false;
523 bool alloc = (bi->tagePred != taken) && (bi->hitBank < nHistoryTables);
524 if (bi->hitBank > 0) {
525 // Manage the selection between longest matching and alternate
526 // matching for "pseudo"-newly allocated longest matching entry
527 longest_match_pred = bi->longestMatchPred;
528 bool PseudoNewAlloc = bi->pseudoNewAlloc;
529 // an entry is considered as newly allocated if its prediction
530 // counter is weak
531 if (PseudoNewAlloc) {
532 if (longest_match_pred == taken) {
533 alloc = false;
534 }
535 // if it was delivering the correct prediction, no need to
536 // allocate new entry even if the overall prediction was false
537 if (longest_match_pred != bi->altTaken) {
538 ctrUpdate(useAltPredForNewlyAllocated,
539 bi->altTaken == taken, 4);
540 }
541 }
542 }
543
544 if (alloc) {
545 // is there some "unuseful" entry to allocate
546 int8_t min = 1;
547 for (int i = nHistoryTables; i > bi->hitBank; i--) {
548 if (gtable[i][bi->tableIndices[i]].u < min) {
549 min = gtable[i][bi->tableIndices[i]].u;
550 }
551 }
552
553 // we allocate an entry with a longer history
554 // to avoid ping-pong, we do not choose systematically the next
555 // entry, but among the 3 next entries
556 int Y = nrand &
557 ((ULL(1) << (nHistoryTables - bi->hitBank - 1)) - 1);
558 int X = bi->hitBank + 1;
559 if (Y & 1) {
560 X++;
561 if (Y & 2)
562 X++;
563 }
564 // No entry available, forces one to be available
565 if (min > 0) {
566 gtable[X][bi->tableIndices[X]].u = 0;
567 }
568
569
570 //Allocate only one entry
571 for (int i = X; i <= nHistoryTables; i++) {
572 if ((gtable[i][bi->tableIndices[i]].u == 0)) {
573 gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
574 gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
575 gtable[i][bi->tableIndices[i]].u = 0; //?
576 }
577 }
578 }
579 //periodic reset of u: reset is not complete but bit by bit
580 tCounter++;
581 if ((tCounter & ((ULL(1) << logTick) - 1)) == 0) {
582 // reset least significant bit
583 // most significant bit becomes least significant bit
584 for (int i = 1; i <= nHistoryTables; i++) {
585 for (int j = 0; j < (ULL(1) << tagTableSizes[i]); j++) {
586 gtable[i][j].u = gtable[i][j].u >> 1;
587 }
588 }
589 }
590
591 if (bi->hitBank > 0) {
592 DPRINTF(LTage, "Updating tag table entry (%d,%d) for branch %lx\n",
593 bi->hitBank, bi->hitBankIndex, branch_pc);
594 ctrUpdate(gtable[bi->hitBank][bi->hitBankIndex].ctr, taken,
595 tagTableCounterBits);
596 // if the provider entry is not certified to be useful also update
597 // the alternate prediction
598 if (gtable[bi->hitBank][bi->hitBankIndex].u == 0) {
599 if (bi->altBank > 0) {
600 ctrUpdate(gtable[bi->altBank][bi->altBankIndex].ctr, taken,
601 tagTableCounterBits);
602 DPRINTF(LTage, "Updating tag table entry (%d,%d) for"
603 " branch %lx\n", bi->hitBank, bi->hitBankIndex,
604 branch_pc);
605 }
606 if (bi->altBank == 0) {
607 baseUpdate(pc, taken, bi);
608 }
609 }
610
611 // update the u counter
612 if (longest_match_pred != bi->altTaken) {
613 if (longest_match_pred == taken) {
614 if (gtable[bi->hitBank][bi->hitBankIndex].u < 1) {
615 gtable[bi->hitBank][bi->hitBankIndex].u++;
616 }
617 }
618 }
619 } else {
620 baseUpdate(pc, taken, bi);
621 }
622
623 //END PREDICTOR UPDATE
624 }
625 if (!squashed) {
626 delete bi;
627 }
628}
629
630void
631LTAGE::updateHistories(ThreadID tid, Addr branch_pc, bool taken, void* b)
632{
633 BranchInfo* bi = (BranchInfo*)(b);
634 ThreadHistory& tHist = threadHistory[tid];
635 // UPDATE HISTORIES
636 bool pathbit = ((branch_pc >> instShiftAmt) & 1);
637 //on a squash, return pointers to this and recompute indices.
638 //update user history
639 updateGHist(tHist.gHist, taken, tHist.globalHistory, tHist.ptGhist);
640 tHist.pathHist = (tHist.pathHist << 1) + pathbit;
641 tHist.pathHist = (tHist.pathHist & ((ULL(1) << 16) - 1));
642
643 bi->ptGhist = tHist.ptGhist;
644 bi->pathHist = tHist.pathHist;
645 //prepare next index and tag computations for user branchs
646 for (int i = 1; i <= nHistoryTables; i++)
647 {
648 bi->ci[i] = tHist.computeIndices[i].comp;
649 bi->ct0[i] = tHist.computeTags[0][i].comp;
650 bi->ct1[i] = tHist.computeTags[1][i].comp;
651 tHist.computeIndices[i].update(tHist.gHist);
652 tHist.computeTags[0][i].update(tHist.gHist);
653 tHist.computeTags[1][i].update(tHist.gHist);
654 }
655 DPRINTF(LTage, "Updating global histories with branch:%lx; taken?:%d, "
656 "path Hist: %x; pointer:%d\n", branch_pc, taken, tHist.pathHist,
657 tHist.ptGhist);
658}
659
660void
661LTAGE::squash(ThreadID tid, bool taken, void *bp_history)
662{
663 BranchInfo* bi = (BranchInfo*)(bp_history);
664 ThreadHistory& tHist = threadHistory[tid];
665 DPRINTF(LTage, "Restoring branch info: %lx; taken? %d; PathHistory:%x, "
666 "pointer:%d\n", bi->branchPC,taken, bi->pathHist, bi->ptGhist);
667 tHist.pathHist = bi->pathHist;
668 tHist.ptGhist = bi->ptGhist;
669 tHist.gHist = &(tHist.globalHistory[tHist.ptGhist]);
670 tHist.gHist[0] = (taken ? 1 : 0);
671 for (int i = 1; i <= nHistoryTables; i++) {
672 tHist.computeIndices[i].comp = bi->ci[i];
673 tHist.computeTags[0][i].comp = bi->ct0[i];
674 tHist.computeTags[1][i].comp = bi->ct1[i];
675 tHist.computeIndices[i].update(tHist.gHist);
676 tHist.computeTags[0][i].update(tHist.gHist);
677 tHist.computeTags[1][i].update(tHist.gHist);
678 }
679
680 if (bi->condBranch) {
681 if (bi->loopHit >= 0) {
682 int idx = bi->loopIndex + bi->loopHit;
683 ltable[idx].currentIterSpec = bi->currentIter;
684 }
685 }
686
687}
688
689void
690LTAGE::squash(ThreadID tid, void *bp_history)
691{
692 BranchInfo* bi = (BranchInfo*)(bp_history);
693 DPRINTF(LTage, "Deleting branch info: %lx\n", bi->branchPC);
694 if (bi->condBranch) {
695 if (bi->loopHit >= 0) {
696 int idx = bi->loopIndex + bi->loopHit;
697 ltable[idx].currentIterSpec = bi->currentIter;
698 }
699 }
700
701 delete bi;
702}
703
704bool
705LTAGE::lookup(ThreadID tid, Addr branch_pc, void* &bp_history)
706{
707 bool retval = predict(tid, branch_pc, true, bp_history);
708
709 DPRINTF(LTage, "Lookup branch: %lx; predict:%d\n", branch_pc, retval);
710 updateHistories(tid, branch_pc, retval, bp_history);
711 assert(threadHistory[tid].gHist ==
712 &threadHistory[tid].globalHistory[threadHistory[tid].ptGhist]);
713
714 return retval;
715}
716
717void
718LTAGE::btbUpdate(ThreadID tid, Addr branch_pc, void* &bp_history)
719{
720 BranchInfo* bi = (BranchInfo*) bp_history;
721 ThreadHistory& tHist = threadHistory[tid];
722 DPRINTF(LTage, "BTB miss resets prediction: %lx\n", branch_pc);
723 assert(tHist.gHist == &tHist.globalHistory[tHist.ptGhist]);
724 tHist.gHist[0] = 0;
725 for (int i = 1; i <= nHistoryTables; i++) {
726 tHist.computeIndices[i].comp = bi->ci[i];
727 tHist.computeTags[0][i].comp = bi->ct0[i];
728 tHist.computeTags[1][i].comp = bi->ct1[i];
729 tHist.computeIndices[i].update(tHist.gHist);
730 tHist.computeTags[0][i].update(tHist.gHist);
731 tHist.computeTags[1][i].update(tHist.gHist);
732 }
733}
734
735void
736LTAGE::uncondBranch(ThreadID tid, Addr br_pc, void* &bp_history)
737{
738 DPRINTF(LTage, "UnConditionalBranch: %lx\n", br_pc);
739 predict(tid, br_pc, false, bp_history);
740 updateHistories(tid, br_pc, true, bp_history);
741 assert(threadHistory[tid].gHist ==
742 &threadHistory[tid].globalHistory[threadHistory[tid].ptGhist]);
743}
744
745LTAGE*
746LTAGEParams::create()
747{
748 return new LTAGE(this);
749}