aboutsummaryrefslogtreecommitdiff
path: root/qtmips_machine
diff options
context:
space:
mode:
authorKarel Kočí <cynerd@email.cz>2018-04-08 11:33:26 +0200
committerKarel Kočí <cynerd@email.cz>2018-04-08 11:33:26 +0200
commit3652d0a13857b7c341fb08a882e12c0b2205c8c0 (patch)
tree1c8db1bdb118cf60c78b478c2d672d48d6873e0b /qtmips_machine
parentd80338e5cfff59f0e9f2be4cd86e312000b2d8c0 (diff)
downloadqtmips-3652d0a13857b7c341fb08a882e12c0b2205c8c0.tar.gz
qtmips-3652d0a13857b7c341fb08a882e12c0b2205c8c0.tar.bz2
qtmips-3652d0a13857b7c341fb08a882e12c0b2205c8c0.zip
Add associative cache
Not fully tested yet.
Diffstat (limited to 'qtmips_machine')
-rw-r--r--qtmips_machine/cache.cpp135
-rw-r--r--qtmips_machine/cache.h15
-rw-r--r--qtmips_machine/machineconfig.h3
3 files changed, 123 insertions, 30 deletions
diff --git a/qtmips_machine/cache.cpp b/qtmips_machine/cache.cpp
index 0f8bd1a..f37db38 100644
--- a/qtmips_machine/cache.cpp
+++ b/qtmips_machine/cache.cpp
@@ -13,6 +13,20 @@ Cache::Cache(Memory *m, MachineConfigCache *cc) : cnf(cc) {
dt[i][y].data = new std::uint32_t[cc->blocks()];
}
}
+ // Allocate replacement policy data
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_LFU:
+ replc.lfu = new unsigned *[cnf.sets()];
+ for (unsigned row = 0; row < cnf.sets(); row++)
+ replc.lfu[row] = new unsigned[cnf.associativity()];
+ break;
+ case MachineConfigCache::RP_LRU:
+ replc.lru = new time_t*[cnf.sets()];
+ for (unsigned row = 0; row < cnf.sets(); row++)
+ replc.lru[row] = new time_t[cnf.associativity()];
+ default:
+ break;
+ }
// Zero hit and miss rate
hitc = 0;
missc = 0;
@@ -34,18 +48,10 @@ std::uint32_t Cache::rword(std::uint32_t address) const {
}
void Cache::flush() {
- for (unsigned as = 0; as < cnf.associativity(); as++) {
- for (unsigned st = 0; st < cnf.sets(); st++) {
- struct cache_data &cd = dt[as][st];
- if (cnf.write_policy() == MachineConfigCache::WP_BACK && cd.valid && cd.dirty) {
- std::uint32_t base_address = cd.tag * cnf.blocks() * cnf.sets();
- for (unsigned i = 0; i < cnf.blocks(); i++)
- mem->wword(base_address + (4*i), cd.data[i]);
- }
- cd.dirty = false;
- cd.valid = false;
- }
- }
+ for (unsigned as = 0; as < cnf.associativity(); as++)
+ for (unsigned st = 0; st < cnf.sets(); st++)
+ if (dt[as][st].valid)
+ kick(as, st);
}
unsigned Cache::hit() {
@@ -56,35 +62,91 @@ unsigned Cache::miss() {
return missc;
}
+void Cache::reset() {
+ // Set all cells to ne invalid
+ for (unsigned as = 0; as < cnf.associativity(); as++)
+ for (unsigned st = 0; st < cnf.sets(); st++)
+ dt[as][st].valid = false;
+ // Note: we don't have to zero replacement policy data as those are zeroed when first used on invalid cell
+ // Zero hit and miss rate
+ hitc = 0;
+ missc = 0;
+}
+
+const MachineConfigCache &Cache::config() {
+ return cnf;
+}
+
void Cache::access(std::uint32_t address, std::uint32_t **data, bool read) const {
- // TODO associativity
address = address >> 2;
unsigned ssize = cnf.blocks() * cnf.sets();
std::uint32_t tag = address / ssize;
- std::uint32_t base_address = ((address / cnf.blocks()) * cnf.blocks()) << 2;
std::uint32_t index = address % ssize;
std::uint32_t row = index / cnf.blocks();
std::uint32_t col = index % cnf.blocks();
- struct cache_data &cd = dt[0][row];
+ unsigned indx = 0;
+ // Try to locate exact block or some unused one
+ while (indx < cnf.associativity() && dt[indx][row].valid && dt[indx][row].tag != tag)
+ indx++;
+ // Replace block
+ if (indx >= cnf.associativity()) {
+ // We have to kick something
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_RAND:
+ indx = rand() % cnf.associativity();
+ break;
+ case MachineConfigCache::RP_LFU:
+ {
+ unsigned lowest = replc.lfu[row][0];
+ indx = 0;
+ for (unsigned i = 1; i < cnf.associativity(); i++)
+ if (lowest > replc.lfu[row][i]) {
+ lowest = replc.lfu[row][i];
+ indx = i;
+ }
+ }
+ break;
+ case MachineConfigCache::RP_LRU:
+ {
+ time_t lowest = replc.lru[row][0];
+ indx = 0;
+ for (unsigned i = 1; i < cnf.associativity(); i++)
+ if (lowest > replc.lru[row][i]) {
+ lowest = replc.lru[row][i];
+ indx = i;
+ }
+ }
+ break;
+ }
+ }
+ SANITY_ASSERT(indx < cnf.associativity(), "Probably unimplemented replacement policy");
+
+ struct cache_data &cd = dt[indx][row];
// Verify if we are not replacing
- if (cd.tag != tag && cd.valid) {
- if (cd.dirty && cnf.write_policy() == MachineConfigCache::WP_BACK)
- for (unsigned i = 0; i < cnf.blocks(); i++)
- mem->wword(base_address + (4*i), cd.data[i]);
- cd.valid = false;
- cd.dirty = false;
- }
+ if (cd.tag != tag && cd.valid)
+ kick(indx, row);
+ // Update statistics and otherwise read from memory
if (cd.valid) {
hitc++;
} else {
missc++;
- if (read) { // If this is read and we don't have valid content then read it from memory
- for (unsigned i = 0; i < cnf.blocks(); i++)
- cd.data[i] = mem->rword(base_address + (4*i));
- }
+ for (unsigned i = 0; i < cnf.blocks(); i++)
+ cd.data[i] = mem->rword(base_address(tag, row) + (4*i));
+ }
+
+ // Update replc
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_LFU:
+ replc.lru[row][indx]++;
+ break;
+ case MachineConfigCache::RP_LRU:
+ replc.lfu[row][indx] = time(NULL);
+ break;
+ default:
+ break;
}
cd.valid = true; // We either write to it or we read from memory. Either way it's valid when we leave Cache class
@@ -92,3 +154,24 @@ void Cache::access(std::uint32_t address, std::uint32_t **data, bool read) const
cd.tag = tag;
*data = &cd.data[col];
}
+
+void Cache::kick(unsigned associat_indx, unsigned row) const {
+ struct cache_data &cd = dt[associat_indx][row];
+ if (cd.dirty && cnf.write_policy() == MachineConfigCache::WP_BACK)
+ for (unsigned i = 0; i < cnf.blocks(); i++)
+ mem->wword(base_address(associat_indx, row) + (4*i), cd.data[i]);
+ cd.valid = false;
+ cd.dirty = false;
+
+ switch (cnf.replacement_policy()) {
+ case MachineConfigCache::RP_LFU:
+ replc.lru[row][associat_indx] = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+std::uint32_t Cache::base_address(std::uint32_t tag, unsigned row) const {
+ return ((tag * cnf.blocks() * cnf.sets()) + (row * cnf.blocks())) << 2;
+}
diff --git a/qtmips_machine/cache.h b/qtmips_machine/cache.h
index 3ad3bdb..9372a1a 100644
--- a/qtmips_machine/cache.h
+++ b/qtmips_machine/cache.h
@@ -4,6 +4,7 @@
#include <memory.h>
#include <machineconfig.h>
#include <stdint.h>
+#include <time.h>
namespace machine {
@@ -20,7 +21,10 @@ public:
unsigned hit(); // Number of recorded hits
unsigned miss(); // Number of recorded misses
- // TODO reset
+ void reset(); // Reset whole state of cache
+
+ const MachineConfigCache &config();
+ // TODO getters for cells
private:
MachineConfigCache cnf;
@@ -33,9 +37,16 @@ private:
};
mutable struct cache_data **dt;
- mutable unsigned hitc, missc;
+ union {
+ time_t ** lru; // Access time
+ unsigned **lfu; // Access count
+ } replc; // Data used for replacement policy
+
+ mutable unsigned hitc, missc; // Hit and miss counters
void access(std::uint32_t address, std::uint32_t **data, bool read) const;
+ void kick(unsigned associat_indx, unsigned row) const;
+ std::uint32_t base_address(std::uint32_t tag, unsigned row) const;
};
}
diff --git a/qtmips_machine/machineconfig.h b/qtmips_machine/machineconfig.h
index 42406ff..1ec2703 100644
--- a/qtmips_machine/machineconfig.h
+++ b/qtmips_machine/machineconfig.h
@@ -26,8 +26,7 @@ public:
enum ReplacementPolicy {
RP_RAND, // Random
RP_LRU, // Least recently used
- RP_LFU, // Least frequently used
- RP_ARC // Adaptive replacement cache
+ RP_LFU // Least frequently used
};
enum WritePolicy {