I'm writing an application in golang. It generates a data structure in memory for a database entry upon request. Often when an entry is requested, it is requested multiple times, so I want to cache the entry in memory, to avoid multiple calls to the database (mostly for latency reasons).
Is it possible to have this in-memory cache expand dynamically in memory until we hit memory pressure (i.e. failed malloc) and then free some of the cache?
Caching in Redis or similar would complicate deployment. If that's the only other option, I'd prefer just to specify a static cache-size at run-time.
I'm not opposed to using C.malloc
I suppose, but I don't know how that interacts with the Go memory management (if I allocate a chunk of memory, and then the go runtime allocates a chunk for a goroutine stack or something on top of the heap, then I can't release my memory to the OS until whatever's on top is freed). Also, I'm compiling without cgo so far, and it'd be nice to continue to do so.
I'm hoping there's something in the debug
or runtime
package that might hint that the system is under memory pressure so that I can dynamically size my cache and keep the program in pure Go.
Any help or insight is greatly appreciated.
This answer has a solution to getting memory allocation at runtime.
Here's a starting point for a concurrent-safe cache using that code:
package main
import (
"bufio"
"os"
"strconv"
"strings"
"sync"
)
type Memory struct {
MemTotal int
MemFree int
MemAvailable int
}
type Cache[T any] struct {
MinMemFree int // Min number of free bytes
chunkSize int // Number of key/value pairs removed from data when MinMemFree is reached
mu sync.Mutex
data map[string]T
order []string // Keeps track of the order of keys added to data
}
func NewCache[T any](minMemFree int, chunkSize int) *Cache[T] {
return &Cache[T]{
MinMemFree: minMemFree,
chunkSize: chunkSize,
data: make(map[string]T),
order: []string{},
}
}
func (c *Cache[T]) Get(key string) T {
c.mu.Lock()
defer c.mu.Unlock()
return c.data[key]
}
func (c *Cache[T]) Set(key string, value T) int {
c.mu.Lock()
defer c.mu.Unlock()
c.data[key] = value
c.order = append(c.order, key)
if c.minSizeReached() {
return c.freeOldestChunk()
}
return 0
}
// Free oldest items in the cache, and return the number removed
func (c *Cache[T]) freeOldestChunk() int {
count := 0
for i := 1; i <= c.chunkSize; i++ {
key := c.shiftOrder()
if key == "" {
break
}
delete(c.data, key)
count++
}
return count
}
func (c *Cache[T]) shiftOrder() string {
if len(c.order) == 0 {
return ""
}
key := c.order[0]
c.order = c.order[1:]
return key
}
func (c *Cache[T]) minSizeReached() bool {
return ReadMemoryStats().MemFree <= c.MinMemFree
}
func ReadMemoryStats() Memory {
file, err := os.Open("/proc/meminfo")
if err != nil {
panic(err)
}
defer file.Close()
bufio.NewScanner(file)
scanner := bufio.NewScanner(file)
res := Memory{}
for scanner.Scan() {
key, value := parseLine(scanner.Text())
switch key {
case "MemTotal":
res.MemTotal = value
case "MemFree":
res.MemFree = value
case "MemAvailable":
res.MemAvailable = value
}
}
return res
}
func parseLine(raw string) (key string, value int) {
text := strings.ReplaceAll(raw[:len(raw)-2], " ", "")
keyValue := strings.Split(text, ":")
return keyValue[0], toInt(keyValue[1])
}
func toInt(raw string) int {
if raw == "" {
return 0
}
res, err := strconv.Atoi(raw)
if err != nil {
panic(err)
}
return res
}