110970Sdavid.hashe@amd.com/*
210970Sdavid.hashe@amd.com * Copyright (c) 2013 Advanced Micro Devices, Inc
310970Sdavid.hashe@amd.com * All rights reserved.
410970Sdavid.hashe@amd.com *
510970Sdavid.hashe@amd.com * Redistribution and use in source and binary forms, with or without
610970Sdavid.hashe@amd.com * modification, are permitted provided that the following conditions are
710970Sdavid.hashe@amd.com * met: redistributions of source code must retain the above copyright
810970Sdavid.hashe@amd.com * notice, this list of conditions and the following disclaimer;
910970Sdavid.hashe@amd.com * redistributions in binary form must reproduce the above copyright
1010970Sdavid.hashe@amd.com * notice, this list of conditions and the following disclaimer in the
1110970Sdavid.hashe@amd.com * documentation and/or other materials provided with the distribution;
1210970Sdavid.hashe@amd.com * neither the name of the copyright holders nor the names of its
1310970Sdavid.hashe@amd.com * contributors may be used to endorse or promote products derived from
1410970Sdavid.hashe@amd.com * this software without specific prior written permission.
1510970Sdavid.hashe@amd.com *
1610970Sdavid.hashe@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1710970Sdavid.hashe@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1810970Sdavid.hashe@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1910970Sdavid.hashe@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2010970Sdavid.hashe@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2110970Sdavid.hashe@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2210970Sdavid.hashe@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2310970Sdavid.hashe@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2410970Sdavid.hashe@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2510970Sdavid.hashe@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2610970Sdavid.hashe@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2710970Sdavid.hashe@amd.com *
2810970Sdavid.hashe@amd.com * Author: Derek Hower
2910970Sdavid.hashe@amd.com */
3010970Sdavid.hashe@amd.com
3110970Sdavid.hashe@amd.com#include "mem/ruby/structures/PseudoLRUPolicy.hh"
3210970Sdavid.hashe@amd.com
3310970Sdavid.hashe@amd.comPseudoLRUPolicy::PseudoLRUPolicy(const Params * p)
3410970Sdavid.hashe@amd.com    : AbstractReplacementPolicy(p)
3510970Sdavid.hashe@amd.com{
3610970Sdavid.hashe@amd.com    // associativity cannot exceed capacity of tree representation
3710970Sdavid.hashe@amd.com    assert(m_num_sets > 0 &&
3810970Sdavid.hashe@amd.com           m_assoc > 1 &&
3911061Snilay@cs.wisc.edu           m_assoc <= (int64_t) sizeof(uint64_t)*4);
4010970Sdavid.hashe@amd.com
4110970Sdavid.hashe@amd.com    m_trees = NULL;
4210970Sdavid.hashe@amd.com    m_num_levels = 0;
4310970Sdavid.hashe@amd.com
4410970Sdavid.hashe@amd.com    m_effective_assoc = 1;
4510970Sdavid.hashe@amd.com    while (m_effective_assoc < m_assoc) {
4610970Sdavid.hashe@amd.com        // effective associativity is ceiling power of 2
4710970Sdavid.hashe@amd.com        m_effective_assoc <<= 1;
4810970Sdavid.hashe@amd.com    }
4910970Sdavid.hashe@amd.com    int tmp_assoc = m_effective_assoc;
5010970Sdavid.hashe@amd.com    while (true) {
5110970Sdavid.hashe@amd.com        tmp_assoc /= 2;
5211321Ssteve.reinhardt@amd.com        if (!tmp_assoc) break;
5310970Sdavid.hashe@amd.com        m_num_levels++;
5410970Sdavid.hashe@amd.com    }
5510970Sdavid.hashe@amd.com    assert(m_num_levels < sizeof(unsigned int)*4);
5611061Snilay@cs.wisc.edu    m_trees = new uint64_t[m_num_sets];
5710970Sdavid.hashe@amd.com    for (unsigned i = 0; i < m_num_sets; i++) {
5810970Sdavid.hashe@amd.com        m_trees[i] = 0;
5910970Sdavid.hashe@amd.com    }
6010970Sdavid.hashe@amd.com}
6110970Sdavid.hashe@amd.com
6210970Sdavid.hashe@amd.comPseudoLRUPolicy *
6310970Sdavid.hashe@amd.comPseudoLRUReplacementPolicyParams::create()
6410970Sdavid.hashe@amd.com{
6510970Sdavid.hashe@amd.com    return new PseudoLRUPolicy(this);
6610970Sdavid.hashe@amd.com}
6710970Sdavid.hashe@amd.com
6810970Sdavid.hashe@amd.com
6910970Sdavid.hashe@amd.comPseudoLRUPolicy::~PseudoLRUPolicy()
7010970Sdavid.hashe@amd.com{
7110970Sdavid.hashe@amd.com    if (m_trees != NULL)
7210970Sdavid.hashe@amd.com        delete[] m_trees;
7310970Sdavid.hashe@amd.com}
7410970Sdavid.hashe@amd.com
7510970Sdavid.hashe@amd.comvoid
7611061Snilay@cs.wisc.eduPseudoLRUPolicy::touch(int64_t set, int64_t index, Tick time)
7710970Sdavid.hashe@amd.com{
7810970Sdavid.hashe@amd.com    assert(index >= 0 && index < m_assoc);
7910970Sdavid.hashe@amd.com    assert(set >= 0 && set < m_num_sets);
8010970Sdavid.hashe@amd.com
8110970Sdavid.hashe@amd.com    int tree_index = 0;
8210970Sdavid.hashe@amd.com    int node_val;
8310970Sdavid.hashe@amd.com    for (int i = m_num_levels - 1; i >= 0; i--) {
8410970Sdavid.hashe@amd.com        node_val = (index >> i)&1;
8510970Sdavid.hashe@amd.com        if (node_val)
8610970Sdavid.hashe@amd.com            m_trees[set] |= node_val << tree_index;
8710970Sdavid.hashe@amd.com        else
8810970Sdavid.hashe@amd.com            m_trees[set] &= ~(1 << tree_index);
8910970Sdavid.hashe@amd.com        tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
9010970Sdavid.hashe@amd.com    }
9110970Sdavid.hashe@amd.com    m_last_ref_ptr[set][index] = time;
9210970Sdavid.hashe@amd.com}
9310970Sdavid.hashe@amd.com
9411061Snilay@cs.wisc.eduint64_t
9511061Snilay@cs.wisc.eduPseudoLRUPolicy::getVictim(int64_t set) const
9610970Sdavid.hashe@amd.com{
9711061Snilay@cs.wisc.edu    int64_t index = 0;
9810970Sdavid.hashe@amd.com
9910970Sdavid.hashe@amd.com    int tree_index = 0;
10010970Sdavid.hashe@amd.com    int node_val;
10110970Sdavid.hashe@amd.com    for (unsigned i = 0; i < m_num_levels; i++){
10210970Sdavid.hashe@amd.com        node_val = (m_trees[set] >> tree_index) & 1;
10310970Sdavid.hashe@amd.com        index += node_val ? 0 : (m_effective_assoc >> (i + 1));
10410970Sdavid.hashe@amd.com        tree_index = node_val ? (tree_index * 2) + 1 : (tree_index * 2) + 2;
10510970Sdavid.hashe@amd.com    }
10610970Sdavid.hashe@amd.com    assert(index >= 0 && index < m_effective_assoc);
10710970Sdavid.hashe@amd.com
10810970Sdavid.hashe@amd.com    /* return either the found index or the max possible index */
10910970Sdavid.hashe@amd.com    /* NOTE: this is not a fair replacement when assoc is not a power of 2 */
11010970Sdavid.hashe@amd.com    return (index > (m_assoc - 1)) ? m_assoc - 1 : index;
11110970Sdavid.hashe@amd.com}
112