aboutsummaryrefslogtreecommitdiff
path: root/qtmips_machine/cache.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'qtmips_machine/cache.cpp')
-rw-r--r--qtmips_machine/cache.cpp135
1 files changed, 109 insertions, 26 deletions
diff --git a/qtmips_machine/cache.cpp b/qtmips_machine/cache.cpp
index 0f8bd1a..f37db38 100644
--- a/qtmips_machine/cache.cpp
+++ b/qtmips_machine/cache.cpp
@@ -13,6 +13,20 @@ Cache::Cache(Memory *m, MachineConfigCache *cc) : cnf(cc) {
dt[i][y].data = new std::uint32_t[cc->blocks()];
}
}
+ // Allocate replacement policy data
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_LFU:
+ replc.lfu = new unsigned *[cnf.sets()];
+ for (unsigned row = 0; row < cnf.sets(); row++)
+ replc.lfu[row] = new unsigned[cnf.associativity()];
+ break;
+ case MachineConfigCache::RP_LRU:
+ replc.lru = new time_t*[cnf.sets()];
+ for (unsigned row = 0; row < cnf.sets(); row++)
+ replc.lru[row] = new time_t[cnf.associativity()];
+ default:
+ break;
+ }
// Zero hit and miss rate
hitc = 0;
missc = 0;
@@ -34,18 +48,10 @@ std::uint32_t Cache::rword(std::uint32_t address) const {
}
void Cache::flush() {
- for (unsigned as = 0; as < cnf.associativity(); as++) {
- for (unsigned st = 0; st < cnf.sets(); st++) {
- struct cache_data &cd = dt[as][st];
- if (cnf.write_policy() == MachineConfigCache::WP_BACK && cd.valid && cd.dirty) {
- std::uint32_t base_address = cd.tag * cnf.blocks() * cnf.sets();
- for (unsigned i = 0; i < cnf.blocks(); i++)
- mem->wword(base_address + (4*i), cd.data[i]);
- }
- cd.dirty = false;
- cd.valid = false;
- }
- }
+ for (unsigned as = 0; as < cnf.associativity(); as++)
+ for (unsigned st = 0; st < cnf.sets(); st++)
+ if (dt[as][st].valid)
+ kick(as, st);
}
unsigned Cache::hit() {
@@ -56,35 +62,91 @@ unsigned Cache::miss() {
return missc;
}
+void Cache::reset() {
+ // Set all cells to ne invalid
+ for (unsigned as = 0; as < cnf.associativity(); as++)
+ for (unsigned st = 0; st < cnf.sets(); st++)
+ dt[as][st].valid = false;
+ // Note: we don't have to zero replacement policy data as those are zeroed when first used on invalid cell
+ // Zero hit and miss rate
+ hitc = 0;
+ missc = 0;
+}
+
+const MachineConfigCache &Cache::config() {
+ return cnf;
+}
+
void Cache::access(std::uint32_t address, std::uint32_t **data, bool read) const {
- // TODO associativity
address = address >> 2;
unsigned ssize = cnf.blocks() * cnf.sets();
std::uint32_t tag = address / ssize;
- std::uint32_t base_address = ((address / cnf.blocks()) * cnf.blocks()) << 2;
std::uint32_t index = address % ssize;
std::uint32_t row = index / cnf.blocks();
std::uint32_t col = index % cnf.blocks();
- struct cache_data &cd = dt[0][row];
+ unsigned indx = 0;
+ // Try to locate exact block or some unused one
+ while (indx < cnf.associativity() && dt[indx][row].valid && dt[indx][row].tag != tag)
+ indx++;
+ // Replace block
+ if (indx >= cnf.associativity()) {
+ // We have to kick something
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_RAND:
+ indx = rand() % cnf.associativity();
+ break;
+ case MachineConfigCache::RP_LFU:
+ {
+ unsigned lowest = replc.lfu[row][0];
+ indx = 0;
+ for (unsigned i = 1; i < cnf.associativity(); i++)
+ if (lowest > replc.lfu[row][i]) {
+ lowest = replc.lfu[row][i];
+ indx = i;
+ }
+ }
+ break;
+ case MachineConfigCache::RP_LRU:
+ {
+ time_t lowest = replc.lru[row][0];
+ indx = 0;
+ for (unsigned i = 1; i < cnf.associativity(); i++)
+ if (lowest > replc.lru[row][i]) {
+ lowest = replc.lru[row][i];
+ indx = i;
+ }
+ }
+ break;
+ }
+ }
+ SANITY_ASSERT(indx < cnf.associativity(), "Probably unimplemented replacement policy");
+
+ struct cache_data &cd = dt[indx][row];
// Verify if we are not replacing
- if (cd.tag != tag && cd.valid) {
- if (cd.dirty && cnf.write_policy() == MachineConfigCache::WP_BACK)
- for (unsigned i = 0; i < cnf.blocks(); i++)
- mem->wword(base_address + (4*i), cd.data[i]);
- cd.valid = false;
- cd.dirty = false;
- }
+ if (cd.tag != tag && cd.valid)
+ kick(indx, row);
+ // Update statistics and otherwise read from memory
if (cd.valid) {
hitc++;
} else {
missc++;
- if (read) { // If this is read and we don't have valid content then read it from memory
- for (unsigned i = 0; i < cnf.blocks(); i++)
- cd.data[i] = mem->rword(base_address + (4*i));
- }
+ for (unsigned i = 0; i < cnf.blocks(); i++)
+ cd.data[i] = mem->rword(base_address(tag, row) + (4*i));
+ }
+
+ // Update replc
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_LFU:
+ replc.lru[row][indx]++;
+ break;
+ case MachineConfigCache::RP_LRU:
+ replc.lfu[row][indx] = time(NULL);
+ break;
+ default:
+ break;
}
cd.valid = true; // We either write to it or we read from memory. Either way it's valid when we leave Cache class
@@ -92,3 +154,24 @@ void Cache::access(std::uint32_t address, std::uint32_t **data, bool read) const
cd.tag = tag;
*data = &cd.data[col];
}
+
+void Cache::kick(unsigned associat_indx, unsigned row) const {
+ struct cache_data &cd = dt[associat_indx][row];
+ if (cd.dirty && cnf.write_policy() == MachineConfigCache::WP_BACK)
+ for (unsigned i = 0; i < cnf.blocks(); i++)
+ mem->wword(base_address(associat_indx, row) + (4*i), cd.data[i]);
+ cd.valid = false;
+ cd.dirty = false;
+
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_LFU:
+ replc.lru[row][associat_indx] = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+std::uint32_t Cache::base_address(std::uint32_t tag, unsigned row) const {
+ return ((tag * cnf.blocks() * cnf.sets()) + (row * cnf.blocks())) << 2;
+}