Cache.py revision 13892
1# Copyright (c) 2012-2013, 2015, 2018 ARM Limited
2# All rights reserved.
3#
4# The license below extends only to copyright in the software and shall
5# not be construed as granting a license to any other intellectual
6# property including but not limited to intellectual property relating
7# to a hardware implementation of the functionality of the software
8# licensed hereunder.  You may use the software subject to the license
9# terms below provided that you ensure that this notice is replicated
10# unmodified and in its entirety in all distributions of the software,
11# modified or unmodified, in source code or in binary form.
12#
13# Copyright (c) 2005-2007 The Regents of The University of Michigan
14# All rights reserved.
15#
16# Redistribution and use in source and binary forms, with or without
17# modification, are permitted provided that the following conditions are
18# met: redistributions of source code must retain the above copyright
19# notice, this list of conditions and the following disclaimer;
20# redistributions in binary form must reproduce the above copyright
21# notice, this list of conditions and the following disclaimer in the
22# documentation and/or other materials provided with the distribution;
23# neither the name of the copyright holders nor the names of its
24# contributors may be used to endorse or promote products derived from
25# this software without specific prior written permission.
26#
27# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38#
39# Authors: Nathan Binkert
40#          Andreas Hansson
41
42from m5.params import *
43from m5.proxy import *
44from m5.SimObject import SimObject
45
46from m5.objects.ClockedObject import ClockedObject
47from m5.objects.Prefetcher import BasePrefetcher
48from m5.objects.ReplacementPolicies import *
49from m5.objects.Tags import *
50
51
52# Enum for cache clusivity, currently mostly inclusive or mostly
53# exclusive.
54class Clusivity(Enum): vals = ['mostly_incl', 'mostly_excl']
55
56class WriteAllocator(SimObject):
57    type = 'WriteAllocator'
58    cxx_header = "mem/cache/cache.hh"
59
60    # Control the limits for when the cache introduces extra delays to
61    # allow whole-line write coalescing, and eventually switches to a
62    # write-no-allocate policy.
63    coalesce_limit = Param.Unsigned(2, "Consecutive lines written before "
64                                    "delaying for coalescing")
65    no_allocate_limit = Param.Unsigned(12, "Consecutive lines written before"
66                                       " skipping allocation")
67
68    delay_threshold = Param.Unsigned(8, "Number of delay quanta imposed on an "
69                                     "MSHR with write requests to allow for "
70                                     "write coalescing")
71
72    block_size = Param.Int(Parent.cache_line_size, "block size in bytes")
73
74
75class BaseCache(ClockedObject):
76    type = 'BaseCache'
77    abstract = True
78    cxx_header = "mem/cache/base.hh"
79
80    size = Param.MemorySize("Capacity")
81    assoc = Param.Unsigned("Associativity")
82
83    tag_latency = Param.Cycles("Tag lookup latency")
84    data_latency = Param.Cycles("Data access latency")
85    response_latency = Param.Cycles("Latency for the return path on a miss");
86
87    warmup_percentage = Param.Percent(0,
88        "Percentage of tags to be touched to warm up the cache")
89
90    max_miss_count = Param.Counter(0,
91        "Number of misses to handle before calling exit")
92
93    mshrs = Param.Unsigned("Number of MSHRs (max outstanding requests)")
94    demand_mshr_reserve = Param.Unsigned(1, "MSHRs reserved for demand access")
95    tgts_per_mshr = Param.Unsigned("Max number of accesses per MSHR")
96    write_buffers = Param.Unsigned(8, "Number of write buffers")
97
98    is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)")
99
100    prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")
101    prefetch_on_access = Param.Bool(False,
102         "Notify the hardware prefetcher on every access (not just misses)")
103
104    tags = Param.BaseTags(BaseSetAssoc(), "Tag store")
105    replacement_policy = Param.BaseReplacementPolicy(LRURP(),
106        "Replacement policy")
107
108    sequential_access = Param.Bool(False,
109        "Whether to access tags and data sequentially")
110
111    cpu_side = SlavePort("Upstream port closer to the CPU and/or device")
112    mem_side = MasterPort("Downstream port closer to memory")
113
114    addr_ranges = VectorParam.AddrRange([AllMemory],
115         "Address range for the CPU-side port (to allow striping)")
116
117    system = Param.System(Parent.any, "System we belong to")
118
119    # Determine if this cache sends out writebacks for clean lines, or
120    # simply clean evicts. In cases where a downstream cache is mostly
121    # exclusive with respect to this cache (acting as a victim cache),
122    # the clean writebacks are essential for performance. In general
123    # this should be set to True for anything but the last-level
124    # cache.
125    writeback_clean = Param.Bool(False, "Writeback clean lines")
126
127    # Control whether this cache should be mostly inclusive or mostly
128    # exclusive with respect to upstream caches. The behaviour on a
129    # fill is determined accordingly. For a mostly inclusive cache,
130    # blocks are allocated on all fill operations. Thus, L1 caches
131    # should be set as mostly inclusive even if they have no upstream
132    # caches. In the case of a mostly exclusive cache, fills are not
133    # allocating unless they came directly from a non-caching source,
134    # e.g. a table walker. Additionally, on a hit from an upstream
135    # cache a line is dropped for a mostly exclusive cache.
136    clusivity = Param.Clusivity('mostly_incl',
137                                "Clusivity with upstream cache")
138
139    # The write allocator enables optimizations for streaming write
140    # accesses by first coalescing writes and then avoiding allocation
141    # in the current cache. Typically, this would be enabled in the
142    # data cache.
143    write_allocator = Param.WriteAllocator(NULL, "Write allocator")
144
145class Cache(BaseCache):
146    type = 'Cache'
147    cxx_header = 'mem/cache/cache.hh'
148
149
150class NoncoherentCache(BaseCache):
151    type = 'NoncoherentCache'
152    cxx_header = 'mem/cache/noncoherent_cache.hh'
153
154    # This is typically a last level cache and any clean
155    # writebacks would be unnecessary traffic to the main memory.
156    writeback_clean = False
157
158