110152Satgutier@umich.edu/*****************************************************************************
210152Satgutier@umich.edu *                                McPAT
310152Satgutier@umich.edu *                      SOFTWARE LICENSE AGREEMENT
410152Satgutier@umich.edu *            Copyright 2012 Hewlett-Packard Development Company, L.P.
510234Syasuko.eckert@amd.com *            Copyright (c) 2010-2013 Advanced Micro Devices, Inc.
610152Satgutier@umich.edu *                          All Rights Reserved
710152Satgutier@umich.edu *
810152Satgutier@umich.edu * Redistribution and use in source and binary forms, with or without
910152Satgutier@umich.edu * modification, are permitted provided that the following conditions are
1010152Satgutier@umich.edu * met: redistributions of source code must retain the above copyright
1110152Satgutier@umich.edu * notice, this list of conditions and the following disclaimer;
1210152Satgutier@umich.edu * redistributions in binary form must reproduce the above copyright
1310152Satgutier@umich.edu * notice, this list of conditions and the following disclaimer in the
1410152Satgutier@umich.edu * documentation and/or other materials provided with the distribution;
1510152Satgutier@umich.edu * neither the name of the copyright holders nor the names of its
1610152Satgutier@umich.edu * contributors may be used to endorse or promote products derived from
1710152Satgutier@umich.edu * this software without specific prior written permission.
1810152Satgutier@umich.edu
1910152Satgutier@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2010152Satgutier@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2110152Satgutier@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2210152Satgutier@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2310152Satgutier@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2410152Satgutier@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2510152Satgutier@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2610152Satgutier@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2710152Satgutier@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2810152Satgutier@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2910234Syasuko.eckert@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3010152Satgutier@umich.edu *
3110152Satgutier@umich.edu ***************************************************************************/
3210234Syasuko.eckert@amd.com
3310152Satgutier@umich.edu#include <algorithm>
3410152Satgutier@umich.edu#include <cassert>
3510152Satgutier@umich.edu#include <cmath>
3610152Satgutier@umich.edu#include <iostream>
3710152Satgutier@umich.edu#include <string>
3810152Satgutier@umich.edu
3910152Satgutier@umich.edu#include "basic_circuit.h"
4010152Satgutier@umich.edu#include "basic_components.h"
4110234Syasuko.eckert@amd.com#include "common.h"
4210152Satgutier@umich.edu#include "const.h"
4310152Satgutier@umich.edu#include "io.h"
4410152Satgutier@umich.edu#include "logic.h"
4510152Satgutier@umich.edu#include "memoryctrl.h"
4610152Satgutier@umich.edu#include "parameter.h"
4710152Satgutier@umich.edu
4810152Satgutier@umich.edu/* overview of MC models:
4910152Satgutier@umich.edu * McPAT memory controllers are modeled according to large number of industrial data points.
5010152Satgutier@umich.edu * The Basic memory controller architecture is base on the Synopsis designs
5110152Satgutier@umich.edu * (DesignWare DDR2/DDR3-Lite memory controllers and DDR2/DDR3-Lite protocol controllers)
5210152Satgutier@umich.edu * as in Cadence ChipEstimator Tool
5310152Satgutier@umich.edu *
5410152Satgutier@umich.edu * An MC has 3 parts as shown in this design. McPAT models both high performance MC
5510152Satgutier@umich.edu * based on Niagara processor designs and curving and low power MC based on data points in
5610152Satgutier@umich.edu * Cadence ChipEstimator Tool.
5710152Satgutier@umich.edu *
5810152Satgutier@umich.edu * The frontend is modeled analytically, the backend is modeled empirically according to
5910152Satgutier@umich.edu * DDR2/DDR3-Lite protocol controllers in Cadence ChipEstimator Tool
6010152Satgutier@umich.edu * The PHY is modeled based on
6110152Satgutier@umich.edu * "A 100mW 9.6Gb/s Transceiver in 90nm CMOS for next-generation memory interfaces ," ISSCC 2006,
6210152Satgutier@umich.edu * and A 14mW 6.25Gb/s Transceiver in 90nm CMOS for Serial Chip-to-Chip Communication," ISSCC 2007
6310152Satgutier@umich.edu *
6410152Satgutier@umich.edu * In Cadence ChipEstimator Tool there are two types of memory controllers: the full memory controllers
6510152Satgutier@umich.edu * that includes the frontend as the DesignWare DDR2/DDR3-Lite memory controllers and the backend only
6610152Satgutier@umich.edu * memory controllers as the DDR2/DDR3-Lite protocol controllers (except DesignWare DDR2/DDR3-Lite memory
6710152Satgutier@umich.edu * controllers, all memory controller IP in Cadence ChipEstimator Tool are backend memory controllers such as
6810152Satgutier@umich.edu * DDRC 1600A and DDRC 800A). Thus,to some extend the area and power difference between DesignWare
6910152Satgutier@umich.edu * DDR2/DDR3-Lite memory controllers and DDR2/DDR3-Lite protocol controllers can be an estimation to the
7010152Satgutier@umich.edu * frontend power and area, which is very close the analitically modeled results of the frontend for Niagara2@65nm
7110152Satgutier@umich.edu *
7210152Satgutier@umich.edu */
7310152Satgutier@umich.edu
7410234Syasuko.eckert@amd.comMCBackend::MCBackend(XMLNode* _xml_data, InputParameter* interface_ip_,
7510234Syasuko.eckert@amd.com                     const MCParameters & mcp_, const MCStatistics & mcs_)
7610234Syasuko.eckert@amd.com    : McPATComponent(_xml_data), l_ip(*interface_ip_), mcp(mcp_), mcs(mcs_) {
7710234Syasuko.eckert@amd.com    name = "Transaction Engine";
7810234Syasuko.eckert@amd.com    local_result = init_interface(&l_ip, name);
7910152Satgutier@umich.edu
8010234Syasuko.eckert@amd.com    // Set up stats for the power calculations
8110234Syasuko.eckert@amd.com    tdp_stats.reset();
8210234Syasuko.eckert@amd.com    tdp_stats.readAc.access = 0.5 * mcp.num_channels * mcp.clockRate;
8310234Syasuko.eckert@amd.com    tdp_stats.writeAc.access = 0.5 * mcp.num_channels * mcp.clockRate;
8410234Syasuko.eckert@amd.com    rtp_stats.reset();
8510234Syasuko.eckert@amd.com    rtp_stats.readAc.access = mcs.reads;
8610234Syasuko.eckert@amd.com    rtp_stats.writeAc.access = mcs.writes;
8710152Satgutier@umich.edu}
8810152Satgutier@umich.edu
8910234Syasuko.eckert@amd.comvoid MCBackend::computeArea() {
9010234Syasuko.eckert@amd.com    // The area is in nm^2
9110234Syasuko.eckert@amd.com    if (mcp.mc_type == MC) {
9210234Syasuko.eckert@amd.com        if (mcp.type == 0) {
9310234Syasuko.eckert@amd.com            output_data.area = (2.7927 * log(mcp.peak_transfer_rate * 2) -
9410234Syasuko.eckert@amd.com                                19.862) / 2.0 * mcp.dataBusWidth / 128.0 *
9510234Syasuko.eckert@amd.com                (l_ip.F_sz_um / 0.09) * mcp.num_channels;
9610234Syasuko.eckert@amd.com        } else {
9710234Syasuko.eckert@amd.com            output_data.area = 0.15 * mcp.dataBusWidth / 72.0 *
9810234Syasuko.eckert@amd.com                (l_ip.F_sz_um / 0.065) * (l_ip.F_sz_um / 0.065) *
9910234Syasuko.eckert@amd.com                mcp.num_channels;
10010152Satgutier@umich.edu        }
10110234Syasuko.eckert@amd.com    } else {
10210234Syasuko.eckert@amd.com        //skip old model
10310234Syasuko.eckert@amd.com        cout << "Unknown memory controllers" << endl;
10410234Syasuko.eckert@amd.com        exit(0);
10510234Syasuko.eckert@amd.com        //area based on Cadence ChipEstimator for 8bit bus
10610234Syasuko.eckert@amd.com        output_data.area = 0.243 * mcp.dataBusWidth / 8;
10710152Satgutier@umich.edu    }
10810152Satgutier@umich.edu}
10910152Satgutier@umich.edu
11010152Satgutier@umich.edu
11110234Syasuko.eckert@amd.comvoid MCBackend::computeEnergy() {
11210234Syasuko.eckert@amd.com    double C_MCB, mc_power;
11310234Syasuko.eckert@amd.com    double backend_dyn;
11410234Syasuko.eckert@amd.com    double backend_gates;
11510234Syasuko.eckert@amd.com    double pmos_to_nmos_sizing_r = pmos_to_nmos_sz_ratio();
11610234Syasuko.eckert@amd.com    double NMOS_sizing = g_tp.min_w_nmos_;
11710234Syasuko.eckert@amd.com    double PMOS_sizing = g_tp.min_w_nmos_ * pmos_to_nmos_sizing_r;
11810234Syasuko.eckert@amd.com    double area_um2 = output_data.area * 1e6;
11910152Satgutier@umich.edu
12010234Syasuko.eckert@amd.com    if (mcp.mc_type == MC) {
12110234Syasuko.eckert@amd.com        if (mcp.type == 0) {
12210234Syasuko.eckert@amd.com            //assuming the approximately same scaling factor as seen in processors.
12310234Syasuko.eckert@amd.com            //C_MCB = 1.6/200/1e6/144/1.2/1.2*g_ip.F_sz_um/0.19;//Based on Niagara power numbers.The base power (W) is divided by device frequency and vdd and scale to target process.
12410234Syasuko.eckert@amd.com            //mc_power = 0.0291*2;//29.1mW@200MHz @130nm From Power Analysis of SystemLevel OnChip Communication Architectures by Lahiri et
12510234Syasuko.eckert@amd.com            mc_power = 4.32*0.1;//4.32W@1GhzMHz @65nm Cadence ChipEstimator 10% for backend
12610234Syasuko.eckert@amd.com            C_MCB = mc_power/1e9/72/1.1/1.1*l_ip.F_sz_um/0.065;
12710234Syasuko.eckert@amd.com            //per access energy in memory controller
12810234Syasuko.eckert@amd.com            power.readOp.dynamic = C_MCB * g_tp.peri_global.Vdd *
12910234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd *
13010234Syasuko.eckert@amd.com                (mcp.dataBusWidth/*+mcp.addressBusWidth*/);
13110234Syasuko.eckert@amd.com            power.readOp.leakage = area_um2 / 2 *
13210234Syasuko.eckert@amd.com                (g_tp.scaling_factor.core_tx_density) *
13310234Syasuko.eckert@amd.com                cmos_Isub_leakage(NMOS_sizing, PMOS_sizing, 1, inv) *
13410234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
13510234Syasuko.eckert@amd.com            power.readOp.gate_leakage = area_um2 / 2 *
13610234Syasuko.eckert@amd.com                (g_tp.scaling_factor.core_tx_density) *
13710234Syasuko.eckert@amd.com                cmos_Ig_leakage(NMOS_sizing, PMOS_sizing, 1, inv) *
13810234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
13910234Syasuko.eckert@amd.com        } else {
14010234Syasuko.eckert@amd.com            //Average on DDR2/3 protocol controller and DDRC 1600/800A in
14110234Syasuko.eckert@amd.com            //Cadence ChipEstimate
14210234Syasuko.eckert@amd.com            backend_dyn = 0.9e-9 / 800e6 * mcp.clockRate / 12800 *
14310234Syasuko.eckert@amd.com                mcp.peak_transfer_rate* mcp.dataBusWidth / 72.0 *
14410234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd / 1.1 * g_tp.peri_global.Vdd / 1.1 *
14510234Syasuko.eckert@amd.com                (l_ip.F_sz_nm/65.0);
14610234Syasuko.eckert@amd.com            //Scaling to technology and DIMM feature. The base IP support
14710234Syasuko.eckert@amd.com            //DDR3-1600(PC3 12800)
14810234Syasuko.eckert@amd.com            //5000 is from Cadence ChipEstimator
14910234Syasuko.eckert@amd.com            backend_gates = 50000 * mcp.dataBusWidth / 64.0;
15010234Syasuko.eckert@amd.com
15110234Syasuko.eckert@amd.com            power.readOp.dynamic = backend_dyn;
15210234Syasuko.eckert@amd.com            power.readOp.leakage = (backend_gates) *
15310234Syasuko.eckert@amd.com                cmos_Isub_leakage(NMOS_sizing, PMOS_sizing, 2, nand) *
15410234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
15510234Syasuko.eckert@amd.com            power.readOp.gate_leakage = (backend_gates) *
15610234Syasuko.eckert@amd.com                cmos_Ig_leakage(NMOS_sizing, PMOS_sizing, 2, nand) *
15710234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
15810234Syasuko.eckert@amd.com          }
15910234Syasuko.eckert@amd.com    } else {
16010234Syasuko.eckert@amd.com        //skip old model
16110234Syasuko.eckert@amd.com        cout<<"Unknown memory controllers"<<endl;exit(0);
16210234Syasuko.eckert@amd.com        //mc_power = 4.32*0.1;//4.32W@1GhzMHz @65nm Cadence ChipEstimator 10% for backend
16310234Syasuko.eckert@amd.com        C_MCB = mc_power/1e9/72/1.1/1.1*l_ip.F_sz_um/0.065;
16410234Syasuko.eckert@amd.com        power.readOp.leakage = area_um2 / 2 *
16510234Syasuko.eckert@amd.com            (g_tp.scaling_factor.core_tx_density) *
16610234Syasuko.eckert@amd.com            cmos_Isub_leakage(NMOS_sizing, PMOS_sizing, 1, inv) *
16710234Syasuko.eckert@amd.com            g_tp.peri_global.Vdd;//unit W
16810234Syasuko.eckert@amd.com        power.readOp.gate_leakage = area_um2 / 2 *
16910234Syasuko.eckert@amd.com            (g_tp.scaling_factor.core_tx_density) *
17010234Syasuko.eckert@amd.com            cmos_Ig_leakage(NMOS_sizing, PMOS_sizing, 1, inv) *
17110234Syasuko.eckert@amd.com            g_tp.peri_global.Vdd;//unit W
17210234Syasuko.eckert@amd.com        power.readOp.dynamic *= 1.2;
17310234Syasuko.eckert@amd.com        power.readOp.leakage *= 1.2;
17410234Syasuko.eckert@amd.com        power.readOp.gate_leakage *= 1.2;
17510234Syasuko.eckert@amd.com        //flash controller has about 20% more backend power since BCH ECC in
17610234Syasuko.eckert@amd.com        //flash is complex and power hungry
17710234Syasuko.eckert@amd.com    }
17810234Syasuko.eckert@amd.com  double long_channel_device_reduction =
17910234Syasuko.eckert@amd.com      longer_channel_device_reduction(Uncore_device);
18010234Syasuko.eckert@amd.com  power.readOp.longer_channel_leakage = power.readOp.leakage *
18110234Syasuko.eckert@amd.com      long_channel_device_reduction;
18210234Syasuko.eckert@amd.com
18310234Syasuko.eckert@amd.com  // Output leakage power calculations
18410234Syasuko.eckert@amd.com  output_data.subthreshold_leakage_power =
18510234Syasuko.eckert@amd.com      longer_channel_device ? power.readOp.longer_channel_leakage :
18610234Syasuko.eckert@amd.com      power.readOp.leakage;
18710234Syasuko.eckert@amd.com  output_data.gate_leakage_power = power.readOp.gate_leakage;
18810234Syasuko.eckert@amd.com
18910234Syasuko.eckert@amd.com  // Peak dynamic power calculation
19010234Syasuko.eckert@amd.com  output_data.peak_dynamic_power = power.readOp.dynamic *
19110234Syasuko.eckert@amd.com      (tdp_stats.readAc.access + tdp_stats.writeAc.access);
19210234Syasuko.eckert@amd.com
19310234Syasuko.eckert@amd.com  // Runtime dynamic energy calculation
19410234Syasuko.eckert@amd.com  output_data.runtime_dynamic_energy =
19510234Syasuko.eckert@amd.com      power.readOp.dynamic *
19610234Syasuko.eckert@amd.com      (rtp_stats.readAc.access + rtp_stats.writeAc.access) *
19710234Syasuko.eckert@amd.com      mcp.llcBlockSize * BITS_PER_BYTE / mcp.dataBusWidth +
19810234Syasuko.eckert@amd.com      // Original McPAT code: Assume 10% of peak power is consumed by routine
19910234Syasuko.eckert@amd.com      // job including memory refreshing and scrubbing
20010234Syasuko.eckert@amd.com      power.readOp.dynamic * 0.1 * execution_time;
20110152Satgutier@umich.edu}
20210152Satgutier@umich.edu
20310234Syasuko.eckert@amd.comMCPHY::MCPHY(XMLNode* _xml_data, InputParameter* interface_ip_,
20410234Syasuko.eckert@amd.com             const MCParameters & mcp_, const MCStatistics & mcs_)
20510234Syasuko.eckert@amd.com    : McPATComponent(_xml_data), l_ip(*interface_ip_), mcp(mcp_), mcs(mcs_) {
20610234Syasuko.eckert@amd.com    name = "Physical Interface (PHY)";
20710234Syasuko.eckert@amd.com    local_result = init_interface(&l_ip, name);
20810152Satgutier@umich.edu
20910234Syasuko.eckert@amd.com    // Set up stats for the power calculations
21010234Syasuko.eckert@amd.com    // TODO: Figure out why TDP stats aren't used
21110234Syasuko.eckert@amd.com    tdp_stats.reset();
21210234Syasuko.eckert@amd.com    tdp_stats.readAc.access = 0.5 * mcp.num_channels;
21310234Syasuko.eckert@amd.com    tdp_stats.writeAc.access = 0.5 * mcp.num_channels;
21410234Syasuko.eckert@amd.com    rtp_stats.reset();
21510234Syasuko.eckert@amd.com    rtp_stats.readAc.access = mcs.reads;
21610234Syasuko.eckert@amd.com    rtp_stats.writeAc.access = mcs.writes;
21710234Syasuko.eckert@amd.com}
21810152Satgutier@umich.edu
21910234Syasuko.eckert@amd.comvoid MCPHY::computeArea() {
22010234Syasuko.eckert@amd.com    if (mcp.mc_type == MC) {
22110234Syasuko.eckert@amd.com        if (mcp.type == 0) {
22210234Syasuko.eckert@amd.com            //Based on die photos from Niagara 1 and 2.
22310234Syasuko.eckert@amd.com            //TODO merge this into undifferentiated core.PHY only achieves
22410234Syasuko.eckert@amd.com            //square root of the ideal scaling.
22510234Syasuko.eckert@amd.com            output_data.area = (6.4323 * log(mcp.peak_transfer_rate * 2) -
22610234Syasuko.eckert@amd.com                                48.134) * mcp.dataBusWidth / 128.0 *
22710234Syasuko.eckert@amd.com                (l_ip.F_sz_um / 0.09) * mcp.num_channels / 2;//TODO:/2
22810234Syasuko.eckert@amd.com        } else {
22910234Syasuko.eckert@amd.com            //Designware/synopsis 16bit DDR3 PHY is 1.3mm (WITH IOs) at 40nm
23010234Syasuko.eckert@amd.com            //for upto DDR3 2133 (PC3 17066)
23110234Syasuko.eckert@amd.com            double non_IO_percentage = 0.2;
23210234Syasuko.eckert@amd.com            output_data.area = 1.3 * non_IO_percentage / 2133.0e6 *
23310234Syasuko.eckert@amd.com                mcp.clockRate / 17066 * mcp.peak_transfer_rate *
23410234Syasuko.eckert@amd.com                mcp.dataBusWidth / 16.0 * (l_ip.F_sz_um / 0.040)*
23510234Syasuko.eckert@amd.com                (l_ip.F_sz_um / 0.040) * mcp.num_channels;//um^2
23610234Syasuko.eckert@amd.com        }
23710234Syasuko.eckert@amd.com    } else {
23810234Syasuko.eckert@amd.com        //area based on Cadence ChipEstimator for 8bit bus
23910234Syasuko.eckert@amd.com        output_data.area = 0.4e6 / 2 * mcp.dataBusWidth / 8 / 1e6;
24010234Syasuko.eckert@amd.com    }
24110234Syasuko.eckert@amd.com}
24210152Satgutier@umich.edu
24310234Syasuko.eckert@amd.comvoid MCPHY::computeEnergy() {
24410234Syasuko.eckert@amd.com    //PHY uses internal data buswidth but the actuall off-chip datawidth is 64bits + ecc
24510234Syasuko.eckert@amd.com    double pmos_to_nmos_sizing_r = pmos_to_nmos_sz_ratio();
24610234Syasuko.eckert@amd.com    /*
24710234Syasuko.eckert@amd.com     * according to "A 100mW 9.6Gb/s Transceiver in 90nm CMOS for next-generation memory interfaces ," ISSCC 2006;
24810234Syasuko.eckert@amd.com     * From Cadence ChipEstimator for normal I/O around 0.4~0.8 mW/Gb/s
24910234Syasuko.eckert@amd.com     */
25010234Syasuko.eckert@amd.com    double power_per_gb_per_s, phy_dyn,phy_gates;
25110234Syasuko.eckert@amd.com    double NMOS_sizing = g_tp.min_w_nmos_;
25210234Syasuko.eckert@amd.com    double PMOS_sizing = g_tp.min_w_nmos_ * pmos_to_nmos_sizing_r;
25310234Syasuko.eckert@amd.com    double area_um2 = output_data.area * 1e6;
25410234Syasuko.eckert@amd.com
25510234Syasuko.eckert@amd.com    if (mcp.mc_type == MC) {
25610234Syasuko.eckert@amd.com        if (mcp.type == 0) {
25710234Syasuko.eckert@amd.com            power_per_gb_per_s = mcp.LVDS ? 0.01 : 0.04;
25810234Syasuko.eckert@amd.com            //This is from curve fitting based on Niagara 1 and 2's PHY die photo.
25910234Syasuko.eckert@amd.com            //This is power not energy, 10mw/Gb/s @90nm for each channel and scaling down
26010234Syasuko.eckert@amd.com            //power.readOp.dynamic = 0.02*memAccesses*llcBlocksize*8;//change from Bytes to bits.
26110234Syasuko.eckert@amd.com            power.readOp.dynamic = power_per_gb_per_s *
26210234Syasuko.eckert@amd.com                sqrt(l_ip.F_sz_um / 0.09) * g_tp.peri_global.Vdd / 1.2 *
26310234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd / 1.2;
26410234Syasuko.eckert@amd.com            power.readOp.leakage = area_um2 / 2 *
26510234Syasuko.eckert@amd.com                (g_tp.scaling_factor.core_tx_density) *
26610234Syasuko.eckert@amd.com                cmos_Isub_leakage(NMOS_sizing, PMOS_sizing, 1, inv) *
26710234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
26810234Syasuko.eckert@amd.com            power.readOp.gate_leakage = area_um2 / 2 *
26910234Syasuko.eckert@amd.com                (g_tp.scaling_factor.core_tx_density) *
27010234Syasuko.eckert@amd.com                cmos_Ig_leakage(NMOS_sizing, PMOS_sizing, 1, inv) *
27110234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
27210234Syasuko.eckert@amd.com        } else {
27310234Syasuko.eckert@amd.com            phy_gates = 200000 * mcp.dataBusWidth / 64.0;
27410234Syasuko.eckert@amd.com            power_per_gb_per_s = 0.01;
27510234Syasuko.eckert@amd.com            //This is power not energy, 10mw/Gb/s @90nm for each channel and scaling down
27610234Syasuko.eckert@amd.com            power.readOp.dynamic = power_per_gb_per_s * (l_ip.F_sz_um / 0.09) *
27710234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd / 1.2 * g_tp.peri_global.Vdd / 1.2;
27810234Syasuko.eckert@amd.com            power.readOp.leakage = (mcp.withPHY ? phy_gates : 0) *
27910234Syasuko.eckert@amd.com                cmos_Isub_leakage(NMOS_sizing, PMOS_sizing, 2, nand) *
28010234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
28110234Syasuko.eckert@amd.com            power.readOp.gate_leakage = (mcp.withPHY ? phy_gates : 0) *
28210234Syasuko.eckert@amd.com                cmos_Ig_leakage(NMOS_sizing, PMOS_sizing, 2, nand) *
28310234Syasuko.eckert@amd.com                g_tp.peri_global.Vdd;//unit W
28410234Syasuko.eckert@amd.com        }
28510234Syasuko.eckert@amd.com    }
28610152Satgutier@umich.edu
28710152Satgutier@umich.edu//  double phy_factor = (int)ceil(mcp.dataBusWidth/72.0);//Previous phy power numbers are based on 72 bit DIMM interface
28810152Satgutier@umich.edu//  power_t.readOp.dynamic *= phy_factor;
28910152Satgutier@umich.edu//  power_t.readOp.leakage *= phy_factor;
29010152Satgutier@umich.edu//  power_t.readOp.gate_leakage *= phy_factor;
29110152Satgutier@umich.edu
29210234Syasuko.eckert@amd.com    double long_channel_device_reduction =
29310234Syasuko.eckert@amd.com        longer_channel_device_reduction(Uncore_device);
29410234Syasuko.eckert@amd.com    power.readOp.longer_channel_leakage =
29510234Syasuko.eckert@amd.com        power.readOp.leakage * long_channel_device_reduction;
29610234Syasuko.eckert@amd.com
29710234Syasuko.eckert@amd.com    // Leakage power calculations
29810234Syasuko.eckert@amd.com    output_data.subthreshold_leakage_power =
29910234Syasuko.eckert@amd.com        longer_channel_device ? power.readOp.longer_channel_leakage :
30010234Syasuko.eckert@amd.com        power.readOp.leakage;
30110234Syasuko.eckert@amd.com    output_data.gate_leakage_power = power.readOp.gate_leakage;
30210234Syasuko.eckert@amd.com
30310234Syasuko.eckert@amd.com    // Peak dynamic power calculation
30410234Syasuko.eckert@amd.com    double data_transfer_unit = (mcp.mc_type == MC)? 72:16;/*DIMM data width*/
30510234Syasuko.eckert@amd.com    output_data.peak_dynamic_power = power.readOp.dynamic *
30610234Syasuko.eckert@amd.com        (mcp.peak_transfer_rate * BITS_PER_BYTE / 1e3) * mcp.dataBusWidth /
30710234Syasuko.eckert@amd.com        data_transfer_unit * mcp.num_channels / mcp.clockRate;
30810234Syasuko.eckert@amd.com
30910234Syasuko.eckert@amd.com    // Runtime dynamic energy calculation
31010234Syasuko.eckert@amd.com    output_data.runtime_dynamic_energy =
31110234Syasuko.eckert@amd.com        power.readOp.dynamic *
31210234Syasuko.eckert@amd.com        (rtp_stats.readAc.access + rtp_stats.writeAc.access) *
31310234Syasuko.eckert@amd.com        mcp.llcBlockSize * BITS_PER_BYTE / 1e9 +
31410234Syasuko.eckert@amd.com        // Original McPAT code: Assume 10% of peak power is consumed by routine
31510234Syasuko.eckert@amd.com        // job including memory refreshing and scrubbing
31610234Syasuko.eckert@amd.com        power.readOp.dynamic * 0.1 * execution_time;
31710152Satgutier@umich.edu}
31810152Satgutier@umich.edu
31910234Syasuko.eckert@amd.comMCFrontEnd::MCFrontEnd(XMLNode* _xml_data, InputParameter* interface_ip_,
32010234Syasuko.eckert@amd.com                       const MCParameters & mcp_, const MCStatistics & mcs_)
32110234Syasuko.eckert@amd.com    : McPATComponent(_xml_data), frontendBuffer(NULL), readBuffer(NULL),
32210234Syasuko.eckert@amd.com      writeBuffer(NULL), MC_arb(NULL), interface_ip(*interface_ip_),
32310234Syasuko.eckert@amd.com    mcp(mcp_), mcs(mcs_) {
32410234Syasuko.eckert@amd.com    int tag, data;
32510234Syasuko.eckert@amd.com    bool is_default = true;//indication for default setup
32610152Satgutier@umich.edu
32710234Syasuko.eckert@amd.com    /* MC frontend engine channels share the same engines but logically partitioned
32810234Syasuko.eckert@amd.com     * For all hardware inside MC. different channels do not share resources.
32910234Syasuko.eckert@amd.com     * TODO: add docodeing/mux stage to steer memory requests to different channels.
33010234Syasuko.eckert@amd.com     */
33110152Satgutier@umich.edu
33210234Syasuko.eckert@amd.com    name = "Front End";
33310152Satgutier@umich.edu
33410234Syasuko.eckert@amd.com    // Memory Request Reorder Buffer
33510234Syasuko.eckert@amd.com    tag = mcp.addressbus_width + EXTRA_TAG_BITS + mcp.opcodeW;
33610234Syasuko.eckert@amd.com    data = int(ceil((physical_address_width + mcp.opcodeW) / BITS_PER_BYTE));
33710152Satgutier@umich.edu
33810234Syasuko.eckert@amd.com    interface_ip.cache_sz = data * mcp.req_window_size_per_channel;
33910234Syasuko.eckert@amd.com    interface_ip.line_sz = data;
34010234Syasuko.eckert@amd.com    interface_ip.assoc = mcp.reorder_buffer_assoc;
34110234Syasuko.eckert@amd.com    interface_ip.nbanks = mcp.reorder_buffer_nbanks;
34210234Syasuko.eckert@amd.com    interface_ip.out_w = interface_ip.line_sz * BITS_PER_BYTE;
34310234Syasuko.eckert@amd.com    interface_ip.specific_tag = tag > 0;
34410234Syasuko.eckert@amd.com    interface_ip.tag_w = tag;
34510234Syasuko.eckert@amd.com    interface_ip.access_mode = Normal;
34610234Syasuko.eckert@amd.com    interface_ip.obj_func_dyn_energy = 0;
34710234Syasuko.eckert@amd.com    interface_ip.obj_func_dyn_power = 0;
34810234Syasuko.eckert@amd.com    interface_ip.obj_func_leak_power = 0;
34910234Syasuko.eckert@amd.com    interface_ip.obj_func_cycle_t = 1;
35010234Syasuko.eckert@amd.com    interface_ip.num_rw_ports = 0;
35110234Syasuko.eckert@amd.com    interface_ip.num_rd_ports = mcp.num_channels;
35210234Syasuko.eckert@amd.com    interface_ip.num_wr_ports = interface_ip.num_rd_ports;
35310234Syasuko.eckert@amd.com    interface_ip.num_se_rd_ports = 0;
35410234Syasuko.eckert@amd.com    interface_ip.num_search_ports = mcp.num_channels;
35510234Syasuko.eckert@amd.com    interface_ip.is_cache = true;
35610234Syasuko.eckert@amd.com    interface_ip.pure_cam = false;
35710234Syasuko.eckert@amd.com    interface_ip.pure_ram = false;
35810234Syasuko.eckert@amd.com    interface_ip.throughput = 1.0 / mcp.clockRate;
35910234Syasuko.eckert@amd.com    interface_ip.latency = 1.0 / mcp.clockRate;
36010234Syasuko.eckert@amd.com    frontendBuffer = new CacheArray(xml_data, &interface_ip, "Reorder Buffer",
36110234Syasuko.eckert@amd.com                                    Uncore_device, mcp.clockRate);
36210234Syasuko.eckert@amd.com    children.push_back(frontendBuffer);
36310234Syasuko.eckert@amd.com
36410234Syasuko.eckert@amd.com    frontendBuffer->tdp_stats.reset();
36510234Syasuko.eckert@amd.com    frontendBuffer->tdp_stats.readAc.access =
36610234Syasuko.eckert@amd.com        frontendBuffer->l_ip.num_search_ports +
36710234Syasuko.eckert@amd.com        frontendBuffer->l_ip.num_wr_ports;
36810234Syasuko.eckert@amd.com    frontendBuffer->tdp_stats.writeAc.access =
36910234Syasuko.eckert@amd.com        frontendBuffer->l_ip.num_search_ports;
37010234Syasuko.eckert@amd.com    frontendBuffer->tdp_stats.searchAc.access =
37110234Syasuko.eckert@amd.com        frontendBuffer->l_ip.num_wr_ports;
37210234Syasuko.eckert@amd.com    frontendBuffer->rtp_stats.reset();
37310234Syasuko.eckert@amd.com    // TODO: These stats assume that access power is calculated per buffer
37410234Syasuko.eckert@amd.com    // bit, which requires the stats to take into account the number of
37510234Syasuko.eckert@amd.com    // bits for each buffer slot. This should be revised...
37610234Syasuko.eckert@amd.com    //For each channel, each memory word need to check the address data to
37710234Syasuko.eckert@amd.com    //achieve best scheduling results.
37810234Syasuko.eckert@amd.com    //and this need to be done on all physical DIMMs in each logical memory
37910234Syasuko.eckert@amd.com    //DIMM *mcp.dataBusWidth/72
38010234Syasuko.eckert@amd.com    frontendBuffer->rtp_stats.readAc.access = mcs.reads * mcp.llcBlockSize *
38110234Syasuko.eckert@amd.com        BITS_PER_BYTE / mcp.dataBusWidth * mcp.dataBusWidth / 72;
38210234Syasuko.eckert@amd.com    frontendBuffer->rtp_stats.writeAc.access = mcs.writes * mcp.llcBlockSize *
38310234Syasuko.eckert@amd.com        BITS_PER_BYTE / mcp.dataBusWidth * mcp.dataBusWidth / 72;
38410234Syasuko.eckert@amd.com    frontendBuffer->rtp_stats.searchAc.access =
38510234Syasuko.eckert@amd.com        frontendBuffer->rtp_stats.readAc.access +
38610234Syasuko.eckert@amd.com        frontendBuffer->rtp_stats.writeAc.access;
38710234Syasuko.eckert@amd.com
38810234Syasuko.eckert@amd.com    // Read Buffers
38910234Syasuko.eckert@amd.com    //Support key words first operation
39010234Syasuko.eckert@amd.com    data = (int)ceil(mcp.dataBusWidth / BITS_PER_BYTE);
39110234Syasuko.eckert@amd.com
39210234Syasuko.eckert@amd.com    interface_ip.cache_sz = data * mcp.IO_buffer_size_per_channel;
39310234Syasuko.eckert@amd.com    interface_ip.line_sz = data;
39410234Syasuko.eckert@amd.com    interface_ip.assoc = mcp.read_buffer_assoc;
39510234Syasuko.eckert@amd.com    interface_ip.nbanks = mcp.read_buffer_nbanks;
39610234Syasuko.eckert@amd.com    interface_ip.out_w = interface_ip.line_sz * BITS_PER_BYTE;
39710234Syasuko.eckert@amd.com    interface_ip.specific_tag = mcp.read_buffer_tag_width > 0;
39810234Syasuko.eckert@amd.com    interface_ip.tag_w = mcp.read_buffer_tag_width;
39910234Syasuko.eckert@amd.com    interface_ip.access_mode = Sequential;
40010234Syasuko.eckert@amd.com    interface_ip.obj_func_dyn_energy = 0;
40110234Syasuko.eckert@amd.com    interface_ip.obj_func_dyn_power = 0;
40210234Syasuko.eckert@amd.com    interface_ip.obj_func_leak_power = 0;
40310234Syasuko.eckert@amd.com    interface_ip.obj_func_cycle_t = 1;
40410234Syasuko.eckert@amd.com    interface_ip.num_rw_ports = 0;
40510234Syasuko.eckert@amd.com    interface_ip.num_rd_ports = mcp.num_channels;
40610234Syasuko.eckert@amd.com    interface_ip.num_wr_ports = interface_ip.num_rd_ports;
40710234Syasuko.eckert@amd.com    interface_ip.num_se_rd_ports = 0;
40810234Syasuko.eckert@amd.com    interface_ip.num_search_ports = 0;
40910234Syasuko.eckert@amd.com    interface_ip.is_cache = false;
41010234Syasuko.eckert@amd.com    interface_ip.pure_cam = false;
41110234Syasuko.eckert@amd.com    interface_ip.pure_ram = true;
41210234Syasuko.eckert@amd.com    interface_ip.throughput = 1.0 / mcp.clockRate;
41310234Syasuko.eckert@amd.com    interface_ip.latency = 1.0 / mcp.clockRate;
41410234Syasuko.eckert@amd.com    readBuffer = new CacheArray(xml_data, &interface_ip, "Read Buffer",
41510234Syasuko.eckert@amd.com                                Uncore_device, mcp.clockRate);
41610234Syasuko.eckert@amd.com    children.push_back(readBuffer);
41710234Syasuko.eckert@amd.com
41810234Syasuko.eckert@amd.com    readBuffer->tdp_stats.reset();
41910234Syasuko.eckert@amd.com    readBuffer->tdp_stats.readAc.access = readBuffer->l_ip.num_rd_ports *
42010234Syasuko.eckert@amd.com        mcs.duty_cycle;
42110234Syasuko.eckert@amd.com    readBuffer->tdp_stats.writeAc.access = readBuffer->l_ip.num_wr_ports *
42210234Syasuko.eckert@amd.com        mcs.duty_cycle;
42310234Syasuko.eckert@amd.com    readBuffer->rtp_stats.reset();
42410234Syasuko.eckert@amd.com    readBuffer->rtp_stats.readAc.access = mcs.reads * mcp.llcBlockSize *
42510234Syasuko.eckert@amd.com        BITS_PER_BYTE / mcp.dataBusWidth;
42610234Syasuko.eckert@amd.com    readBuffer->rtp_stats.writeAc.access = mcs.reads * mcp.llcBlockSize *
42710234Syasuko.eckert@amd.com        BITS_PER_BYTE / mcp.dataBusWidth;
42810234Syasuko.eckert@amd.com
42910234Syasuko.eckert@amd.com    // Write Buffer
43010234Syasuko.eckert@amd.com    //Support key words first operation
43110234Syasuko.eckert@amd.com    data = (int)ceil(mcp.dataBusWidth / BITS_PER_BYTE);
43210234Syasuko.eckert@amd.com
43310234Syasuko.eckert@amd.com    interface_ip.cache_sz = data * mcp.IO_buffer_size_per_channel;
43410234Syasuko.eckert@amd.com    interface_ip.line_sz = data;
43510234Syasuko.eckert@amd.com    interface_ip.assoc = mcp.write_buffer_assoc;
43610234Syasuko.eckert@amd.com    interface_ip.nbanks = mcp.write_buffer_nbanks;
43710234Syasuko.eckert@amd.com    interface_ip.out_w = interface_ip.line_sz * BITS_PER_BYTE;
43810234Syasuko.eckert@amd.com    interface_ip.specific_tag = mcp.write_buffer_tag_width > 0;
43910234Syasuko.eckert@amd.com    interface_ip.tag_w = mcp.write_buffer_tag_width;
44010234Syasuko.eckert@amd.com    interface_ip.access_mode = Normal;
44110234Syasuko.eckert@amd.com    interface_ip.obj_func_dyn_energy = 0;
44210234Syasuko.eckert@amd.com    interface_ip.obj_func_dyn_power = 0;
44310234Syasuko.eckert@amd.com    interface_ip.obj_func_leak_power = 0;
44410234Syasuko.eckert@amd.com    interface_ip.obj_func_cycle_t = 1;
44510234Syasuko.eckert@amd.com    interface_ip.num_rw_ports = 0;
44610234Syasuko.eckert@amd.com    interface_ip.num_rd_ports = mcp.num_channels;
44710234Syasuko.eckert@amd.com    interface_ip.num_wr_ports = interface_ip.num_rd_ports;
44810234Syasuko.eckert@amd.com    interface_ip.num_se_rd_ports = 0;
44910234Syasuko.eckert@amd.com    interface_ip.num_search_ports = 0;
45010234Syasuko.eckert@amd.com    interface_ip.is_cache = false;
45110234Syasuko.eckert@amd.com    interface_ip.pure_cam = false;
45210234Syasuko.eckert@amd.com    interface_ip.pure_ram = true;
45310234Syasuko.eckert@amd.com    interface_ip.throughput = 1.0 / mcp.clockRate;
45410234Syasuko.eckert@amd.com    interface_ip.latency = 1.0 / mcp.clockRate;
45510234Syasuko.eckert@amd.com    writeBuffer = new CacheArray(xml_data, &interface_ip, "Write Buffer",
45610234Syasuko.eckert@amd.com                                 Uncore_device, mcp.clockRate);
45710234Syasuko.eckert@amd.com    children.push_back(writeBuffer);
45810234Syasuko.eckert@amd.com
45910234Syasuko.eckert@amd.com    writeBuffer->tdp_stats.reset();
46010234Syasuko.eckert@amd.com    writeBuffer->tdp_stats.readAc.access = writeBuffer->l_ip.num_rd_ports *
46110234Syasuko.eckert@amd.com        mcs.duty_cycle;
46210234Syasuko.eckert@amd.com    writeBuffer->tdp_stats.writeAc.access = writeBuffer->l_ip.num_wr_ports *
46310234Syasuko.eckert@amd.com        mcs.duty_cycle;
46410234Syasuko.eckert@amd.com    writeBuffer->rtp_stats.reset();
46510234Syasuko.eckert@amd.com    writeBuffer->rtp_stats.readAc.access = mcs.reads * mcp.llcBlockSize *
46610234Syasuko.eckert@amd.com        BITS_PER_BYTE / mcp.dataBusWidth;
46710234Syasuko.eckert@amd.com    writeBuffer->rtp_stats.writeAc.access = mcs.writes * mcp.llcBlockSize *
46810234Syasuko.eckert@amd.com        BITS_PER_BYTE / mcp.dataBusWidth;
46910234Syasuko.eckert@amd.com
47010234Syasuko.eckert@amd.com    // TODO: Set up selection logic as a leaf node in tree
47110234Syasuko.eckert@amd.com    //selection and arbitration logic
47210234Syasuko.eckert@amd.com    MC_arb =
47310234Syasuko.eckert@amd.com        new selection_logic(xml_data, is_default,
47410234Syasuko.eckert@amd.com                            mcp.req_window_size_per_channel, 1, &interface_ip,
47510234Syasuko.eckert@amd.com                            "Arbitration Logic", (mcs.reads + mcs.writes),
47610234Syasuko.eckert@amd.com                            mcp.clockRate, Uncore_device);
47710234Syasuko.eckert@amd.com    // MC_arb is not included in the roll-up due to the uninitialized area
47810234Syasuko.eckert@amd.com    //children.push_back(MC_arb);
47910234Syasuko.eckert@amd.com}
48010234Syasuko.eckert@amd.com
48110234Syasuko.eckert@amd.comMemoryController::MemoryController(XMLNode* _xml_data,
48210234Syasuko.eckert@amd.com                                   InputParameter* interface_ip_)
48310234Syasuko.eckert@amd.com    : McPATComponent(_xml_data), interface_ip(*interface_ip_) {
48410234Syasuko.eckert@amd.com    name = "Memory Controller";
48510234Syasuko.eckert@amd.com    set_mc_param();
48610234Syasuko.eckert@amd.com    // TODO: Pass params and stats as pointers
48710234Syasuko.eckert@amd.com    children.push_back(new MCFrontEnd(xml_data, &interface_ip, mcp, mcs));
48810234Syasuko.eckert@amd.com    children.push_back(new MCBackend(xml_data, &interface_ip, mcp, mcs));
48910234Syasuko.eckert@amd.com
49010234Syasuko.eckert@amd.com    if (mcp.type==0 || (mcp.type == 1 && mcp.withPHY)) {
49110234Syasuko.eckert@amd.com        children.push_back(new MCPHY(xml_data, &interface_ip, mcp, mcs));
49210152Satgutier@umich.edu    }
49310152Satgutier@umich.edu}
49410152Satgutier@umich.edu
49510234Syasuko.eckert@amd.comvoid MemoryController::initialize_params() {
49610234Syasuko.eckert@amd.com    memset(&mcp, 0, sizeof(MCParameters));
49710152Satgutier@umich.edu}
49810152Satgutier@umich.edu
49910234Syasuko.eckert@amd.comvoid MemoryController::set_mc_param() {
50010234Syasuko.eckert@amd.com    initialize_params();
50110152Satgutier@umich.edu
50210234Syasuko.eckert@amd.com    int num_children = xml_data->nChildNode("param");
50310234Syasuko.eckert@amd.com    int tech_type;
50410234Syasuko.eckert@amd.com    int mat_type;
50510234Syasuko.eckert@amd.com    int i;
50610234Syasuko.eckert@amd.com    for (i = 0; i < num_children; i++) {
50710234Syasuko.eckert@amd.com        XMLNode* paramNode = xml_data->getChildNodePtr("param", &i);
50810234Syasuko.eckert@amd.com        XMLCSTR node_name = paramNode->getAttribute("name");
50910234Syasuko.eckert@amd.com        XMLCSTR value = paramNode->getAttribute("value");
51010152Satgutier@umich.edu
51110234Syasuko.eckert@amd.com        if (!node_name)
51210234Syasuko.eckert@amd.com            warnMissingParamName(paramNode->getAttribute("id"));
51310152Satgutier@umich.edu
51410234Syasuko.eckert@amd.com        ASSIGN_FP_IF("mc_clock", mcp.clockRate);
51510234Syasuko.eckert@amd.com        ASSIGN_INT_IF("tech_type", tech_type);
51610234Syasuko.eckert@amd.com        ASSIGN_ENUM_IF("mc_type", mcp.mc_type, MemoryCtrl_type);
51710234Syasuko.eckert@amd.com        ASSIGN_FP_IF("num_mcs", mcp.num_mcs);
51810234Syasuko.eckert@amd.com        ASSIGN_INT_IF("llc_line_length", mcp.llc_line_length);
51910234Syasuko.eckert@amd.com        ASSIGN_INT_IF("databus_width", mcp.databus_width);
52010234Syasuko.eckert@amd.com        ASSIGN_INT_IF("memory_channels_per_mc", mcp.num_channels);
52110234Syasuko.eckert@amd.com        ASSIGN_INT_IF("req_window_size_per_channel",
52210234Syasuko.eckert@amd.com                      mcp.req_window_size_per_channel);
52310234Syasuko.eckert@amd.com        ASSIGN_INT_IF("IO_buffer_size_per_channel",
52410234Syasuko.eckert@amd.com                      mcp.IO_buffer_size_per_channel);
52510234Syasuko.eckert@amd.com        ASSIGN_INT_IF("addressbus_width", mcp.addressbus_width);
52610234Syasuko.eckert@amd.com        ASSIGN_INT_IF("opcode_width", mcp.opcodeW);
52710234Syasuko.eckert@amd.com        ASSIGN_INT_IF("type", mcp.type);
52810234Syasuko.eckert@amd.com        ASSIGN_ENUM_IF("LVDS", mcp.LVDS, bool);
52910234Syasuko.eckert@amd.com        ASSIGN_ENUM_IF("withPHY", mcp.withPHY, bool);
53010234Syasuko.eckert@amd.com        ASSIGN_INT_IF("peak_transfer_rate", mcp.peak_transfer_rate);
53110234Syasuko.eckert@amd.com        ASSIGN_INT_IF("number_ranks", mcp.number_ranks);
53210234Syasuko.eckert@amd.com        ASSIGN_INT_IF("reorder_buffer_assoc", mcp.reorder_buffer_assoc);
53310234Syasuko.eckert@amd.com        ASSIGN_INT_IF("reorder_buffer_nbanks", mcp.reorder_buffer_nbanks);
53410234Syasuko.eckert@amd.com        ASSIGN_INT_IF("read_buffer_assoc", mcp.read_buffer_assoc);
53510234Syasuko.eckert@amd.com        ASSIGN_INT_IF("read_buffer_nbanks", mcp.read_buffer_nbanks);
53610234Syasuko.eckert@amd.com        ASSIGN_INT_IF("read_buffer_tag_width", mcp.read_buffer_tag_width);
53710234Syasuko.eckert@amd.com        ASSIGN_INT_IF("write_buffer_assoc", mcp.write_buffer_assoc);
53810234Syasuko.eckert@amd.com        ASSIGN_INT_IF("write_buffer_nbanks", mcp.write_buffer_nbanks);
53910234Syasuko.eckert@amd.com        ASSIGN_INT_IF("write_buffer_tag_width", mcp.write_buffer_tag_width);
54010234Syasuko.eckert@amd.com        ASSIGN_INT_IF("wire_mat_type", mat_type);
54110234Syasuko.eckert@amd.com        ASSIGN_ENUM_IF("wire_type", interface_ip.wt, Wire_type);
54210152Satgutier@umich.edu
54310234Syasuko.eckert@amd.com        else {
54410234Syasuko.eckert@amd.com            warnUnrecognizedParam(node_name);
54510234Syasuko.eckert@amd.com        }
54610234Syasuko.eckert@amd.com    }
54710152Satgutier@umich.edu
54810234Syasuko.eckert@amd.com    if (mcp.mc_type != MC) {
54910234Syasuko.eckert@amd.com        cout << "Unknown memory controller type: Only DRAM controller is "
55010234Syasuko.eckert@amd.com             << "supported for now" << endl;
55110234Syasuko.eckert@amd.com                exit(0);
55210234Syasuko.eckert@amd.com    }
55310152Satgutier@umich.edu
55410234Syasuko.eckert@amd.com    // Change from MHz to Hz
55510234Syasuko.eckert@amd.com    mcp.clockRate *= 1e6;
55610152Satgutier@umich.edu
55710234Syasuko.eckert@amd.com    interface_ip.data_arr_ram_cell_tech_type    = tech_type;
55810234Syasuko.eckert@amd.com    interface_ip.data_arr_peri_global_tech_type = tech_type;
55910234Syasuko.eckert@amd.com    interface_ip.tag_arr_ram_cell_tech_type     = tech_type;
56010234Syasuko.eckert@amd.com    interface_ip.tag_arr_peri_global_tech_type  = tech_type;
56110234Syasuko.eckert@amd.com    interface_ip.wire_is_mat_type = mat_type;
56210234Syasuko.eckert@amd.com    interface_ip.wire_os_mat_type = mat_type;
56310152Satgutier@umich.edu
56410234Syasuko.eckert@amd.com    num_children = xml_data->nChildNode("stat");
56510234Syasuko.eckert@amd.com    for (i = 0; i < num_children; i++) {
56610234Syasuko.eckert@amd.com        XMLNode* statNode = xml_data->getChildNodePtr("stat", &i);
56710234Syasuko.eckert@amd.com        XMLCSTR node_name = statNode->getAttribute("name");
56810234Syasuko.eckert@amd.com        XMLCSTR value = statNode->getAttribute("value");
56910152Satgutier@umich.edu
57010234Syasuko.eckert@amd.com        if (!node_name)
57110234Syasuko.eckert@amd.com            warnMissingStatName(statNode->getAttribute("id"));
57210152Satgutier@umich.edu
57310234Syasuko.eckert@amd.com        ASSIGN_FP_IF("duty_cycle", mcs.duty_cycle);
57410234Syasuko.eckert@amd.com        ASSIGN_FP_IF("perc_load", mcs.perc_load);
57510234Syasuko.eckert@amd.com        ASSIGN_FP_IF("memory_reads", mcs.reads);
57610234Syasuko.eckert@amd.com        ASSIGN_INT_IF("memory_writes", mcs.writes);
57710152Satgutier@umich.edu
57810234Syasuko.eckert@amd.com        else {
57910234Syasuko.eckert@amd.com            warnUnrecognizedStat(node_name);
58010234Syasuko.eckert@amd.com        }
58110152Satgutier@umich.edu    }
58210234Syasuko.eckert@amd.com
58310234Syasuko.eckert@amd.com    // Add ECC overhead
58410234Syasuko.eckert@amd.com    mcp.llcBlockSize = int(ceil(mcp.llc_line_length / BITS_PER_BYTE)) +
58510234Syasuko.eckert@amd.com        mcp.llc_line_length;
58610234Syasuko.eckert@amd.com    mcp.dataBusWidth = int(ceil(mcp.databus_width / BITS_PER_BYTE)) +
58710234Syasuko.eckert@amd.com        mcp.databus_width;
58810234Syasuko.eckert@amd.com}
58910234Syasuko.eckert@amd.com
59010234Syasuko.eckert@amd.comMCFrontEnd ::~MCFrontEnd() {
59110234Syasuko.eckert@amd.com
59210234Syasuko.eckert@amd.com    if (MC_arb) {
59310234Syasuko.eckert@amd.com        delete MC_arb;
59410234Syasuko.eckert@amd.com        MC_arb = NULL;
59510234Syasuko.eckert@amd.com    }
59610234Syasuko.eckert@amd.com    if (frontendBuffer) {
59710234Syasuko.eckert@amd.com        delete frontendBuffer;
59810234Syasuko.eckert@amd.com        frontendBuffer = NULL;
59910234Syasuko.eckert@amd.com    }
60010234Syasuko.eckert@amd.com    if (readBuffer) {
60110234Syasuko.eckert@amd.com        delete readBuffer;
60210234Syasuko.eckert@amd.com        readBuffer = NULL;
60310234Syasuko.eckert@amd.com    }
60410234Syasuko.eckert@amd.com    if (writeBuffer) {
60510234Syasuko.eckert@amd.com        delete writeBuffer;
60610234Syasuko.eckert@amd.com        writeBuffer = NULL;
60710152Satgutier@umich.edu    }
60810152Satgutier@umich.edu}
60910152Satgutier@umich.edu
61010234Syasuko.eckert@amd.comMemoryController::~MemoryController() {
61110234Syasuko.eckert@amd.com    // TODO: use default constructor to delete children
61210152Satgutier@umich.edu}
61310152Satgutier@umich.edu
614