Searched refs:allocate (Results 26 - 50 of 75) sorted by relevance

123

/gem5/ext/dsent/libutil/
H A DLog.cc34 void Log::allocate(const String& log_file_name_) function in class:LibUtil::Log
/gem5/src/arch/x86/
H A Dintmessage.hh88 pkt->allocate();
/gem5/src/cpu/testers/directedtest/
H A DSeriesRequestGenerator.cc74 pkt->allocate();
H A DInvalidateGenerator.cc81 pkt->allocate();
/gem5/src/mem/cache/
H A Dwrite_queue_entry.hh127 void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
H A Dwrite_queue_entry.cc91 WriteQueueEntry::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target, function in class:WriteQueueEntry
H A Dbase.hh396 * Determine whether we should allocate on a fill or not. If this
398 * we always allocate (for any non-forwarded and cacheable
399 * requests). In the case of a mostly exclusive cache, we allocate
406 * @return Whether we should allocate on the fill
727 * @param allocate Whether to allocate a block or use the temp block
731 PacketList &writebacks, bool allocate);
1073 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1111 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1274 * line) we switch to NO_ALLOCATE when writes should not allocate i
1303 bool allocate() const { function in class:WriteAllocator
[all...]
H A Dcache.cc344 // uncacheable accesses always allocate a new MSHR
386 pf->allocate();
439 // flags, there is no need to allocate any data as the
558 pkt->allocate();
627 const bool allocate = allocOnFill(pkt->cmd) && local
628 (!writeAllocator || writeAllocator->allocate());
629 blk = handleFill(bus_pkt, blk, writebacks, allocate);
817 // this response did not allocate here and therefore
913 pkt->allocate();
938 // do not clear flags, and allocate spac
[all...]
H A Dmshr.cc93 // potentially re-evaluate whether we should allocate on a fill or
163 // actually allocate space for the data payload
164 pkt->allocate();
253 MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target, function in class:MSHR
271 // Don't know of a case where we would allocate a new MSHR for a
438 // save a copy here. Clear flags and also allocate new data as
/gem5/src/mem/ruby/structures/
H A DDirectoryMemory.cc124 DirectoryMemory::allocate(Addr address, AbstractEntry *entry)
/gem5/util/tlm/src/
H A Dsc_slave_port.cc103 tlm::tlm_generic_payload * trans = mm.allocate();
140 tlm::tlm_generic_payload * trans = mm.allocate();
209 tlm::tlm_generic_payload * trans = mm.allocate();
/gem5/src/mem/
H A Dsnoop_filter.cc72 bool allocate = !cpkt->req->isUncacheable() && slave_port.isSnooping() && local
82 // If the snoop filter has no entry, and we should not allocate,
85 if (!is_hit && !allocate)
113 if (!allocate)
345 // we only allocate if the packet actually came from a cache, but
/gem5/util/tlm/examples/master_port/
H A Dtraffic_generator.cc58 auto trans = mm.allocate();
/gem5/ext/libelf/
H A Dlibelf_ehdr.c99 _libelf_ehdr(Elf *e, int ec, int allocate) argument
160 if (allocate)
/gem5/ext/systemc/src/sysc/datatypes/bit/
H A Dsc_logic.h286 { return sc_core::sc_mempool::allocate( sz ); }
292 { return sc_core::sc_mempool::allocate( sz ); }
/gem5/ext/systemc/src/sysc/datatypes/int/
H A Dsc_int_base.h1234 sc_int_bitref* result_p = sc_int_bitref::m_pool.allocate();
1244 sc_int_bitref* result_p = sc_int_bitref::m_pool.allocate();
1255 sc_int_bitref* result_p = sc_int_bitref::m_pool.allocate();
1265 sc_int_bitref* result_p = sc_int_bitref::m_pool.allocate();
1278 sc_int_subref* result_p = sc_int_subref::m_pool.allocate();
1288 sc_int_subref* result_p = sc_int_subref::m_pool.allocate();
1299 sc_int_subref* result_p = sc_int_subref::m_pool.allocate();
1309 sc_int_subref* result_p = sc_int_subref::m_pool.allocate();
H A Dsc_uint_base.h1204 sc_uint_bitref* result_p = sc_uint_bitref::m_pool.allocate();
1214 sc_uint_bitref* result_p = sc_uint_bitref::m_pool.allocate();
1225 sc_uint_bitref* result_p = sc_uint_bitref::m_pool.allocate();
1235 sc_uint_bitref* result_p = sc_uint_bitref::m_pool.allocate();
1248 sc_uint_subref* result_p = sc_uint_subref::m_pool.allocate();
1258 sc_uint_subref* result_p = sc_uint_subref::m_pool.allocate();
1269 sc_uint_subref* result_p = sc_uint_subref::m_pool.allocate();
1279 sc_uint_subref* result_p = sc_uint_subref::m_pool.allocate();
/gem5/src/systemc/ext/dt/bit/
H A Dsc_logic.hh260 return sc_core::sc_mempool::allocate(sz);
270 return sc_core::sc_mempool::allocate(sz);
/gem5/src/systemc/ext/dt/int/
H A Dsc_int_base.hh1256 sc_int_bitref *result_p = sc_int_bitref::m_pool.allocate();
1265 sc_int_bitref *result_p = sc_int_bitref::m_pool.allocate();
1275 sc_int_bitref *result_p = sc_int_bitref::m_pool.allocate();
1284 sc_int_bitref *result_p = sc_int_bitref::m_pool.allocate();
1296 sc_int_subref *result_p = sc_int_subref::m_pool.allocate();
1305 sc_int_subref *result_p = sc_int_subref::m_pool.allocate();
1315 sc_int_subref *result_p = sc_int_subref::m_pool.allocate();
1324 sc_int_subref *result_p = sc_int_subref::m_pool.allocate();
H A Dsc_uint_base.hh1141 sc_uint_bitref *result_p = sc_uint_bitref::m_pool.allocate();
1150 sc_uint_bitref *result_p = sc_uint_bitref::m_pool.allocate();
1159 sc_uint_bitref *result_p = sc_uint_bitref::m_pool.allocate();
1168 sc_uint_bitref *result_p = sc_uint_bitref::m_pool.allocate();
1178 sc_uint_subref *result_p = sc_uint_subref::m_pool.allocate();
1187 sc_uint_subref *result_p = sc_uint_subref::m_pool.allocate();
1196 sc_uint_subref *result_p = sc_uint_subref::m_pool.allocate();
1205 sc_uint_subref *result_p = sc_uint_subref::m_pool.allocate();
/gem5/ext/sst/
H A DExtMaster.cc178 pkt->allocate();
/gem5/src/systemc/tests/tlm/nb2b_adapter/
H A Dnb2b_adapter.cpp53 trans = m_mm.allocate();
79 trans = m_mm.allocate();
/gem5/src/mem/ruby/slicc_interface/
H A DAbstractController.cc271 pkt->allocate();
296 pkt->allocate();
/gem5/ext/systemc/src/sysc/kernel/
H A Dsc_event.h362 { return allocate(); }
370 static void* allocate();
H A Dsc_object.cpp243 namebuf = (char*) sc_mempool::allocate(namebuf_alloc);

Completed in 58 milliseconds

123