Skip to main content
Version: 2.8.x(Latest)

The caching component by default provides a high-speed in-memory cache, with operation efficiency at the ns nanosecond level of CPU performance loss.

Usage Example

Basic Usage

package main

import (
"fmt"
"github.com/gogf/gf/v2/os/gcache"
"github.com/gogf/gf/v2/os/gctx"
)

func main() {
// Create a cache object,
// but you can also conveniently use the gcache package methods directly
var (
ctx = gctx.New()
cache = gcache.New()
)

// Set cache, no expiration
err := cache.Set(ctx, "k1", "v1", 0)
if err != nil {
panic(err)
}

// Get cache value
value, err := cache.Get(ctx, "k1")
if err != nil {
panic(err)
}
fmt.Println(value)

// Get cache size
size, err := cache.Size(ctx)
if err != nil {
panic(err)
}
fmt.Println(size)

// Check if a specific key name exists in the cache
b, err := cache.Contains(ctx, "k1")
if err != nil {
panic(err)
}
fmt.Println(b)

// Remove and return the deleted key-value
removedValue, err := cache.Remove(ctx, "k1")
if err != nil {
panic(err)
}
fmt.Println(removedValue)

// Close the cache object to allow GC to reclaim resources
if err = cache.Close(ctx); err != nil {
panic(err)
}
}

After execution, the output is:

v1
1
true
v1

Expiration Control

package main

import (
"fmt"
"github.com/gogf/gf/v2/os/gcache"
"github.com/gogf/gf/v2/os/gctx"
"time"
)

func main() {
var (
ctx = gctx.New()
)
// Write when the key name does not exist, set expiration time to 1000 milliseconds
_, err := gcache.SetIfNotExist(ctx, "k1", "v1", time.Second)
if err != nil {
panic(err)
}

// Print current list of key names
keys, err := gcache.Keys(ctx)
if err != nil {
panic(err)
}
fmt.Println(keys)

// Print current list of key-values
values, err := gcache.Values(ctx)
if err != nil {
panic(err)
}
fmt.Println(values)

// Get a specific key value, write if it does not exist, and return the key value
value, err := gcache.GetOrSet(ctx, "k2", "v2", 0)
if err != nil {
panic(err)
}
fmt.Println(value)

// Print current key-value pairs
data1, err := gcache.Data(ctx)
if err != nil {
panic(err)
}
fmt.Println(data1)

// Wait for 1 second, so k1:v1 expires automatically
time.Sleep(time.Second)

// Print current key-value pairs again, and find that k1:v1 has expired, leaving only k2:v2
data2, err := gcache.Data(ctx)
if err != nil {
panic(err)
}
fmt.Println(data2)
}

After execution, the output is:

[k1]
[v1]
v2
map[k1:v1 k2:v2]
map[k2:v2]

GetOrSetFunc*

GetOrSetFunc retrieves a cache value, and if the cache does not exist, executes the specified f func(context.Context) (interface{}, error), caches the result of the f method, and returns that result.

It is important to note that GetOrSetFunc's cache method parameter f is executed outside of the cache's lock mechanism, so GetOrSetFunc can be nested within f. However, if f is computationally intensive, it may be executed multiple times under high concurrency (only the result from the first executed f can be successfully cached, the rest will be discarded). On the other hand, GetOrSetFuncLock's cache method f is executed within the cache's lock mechanism, ensuring that f is executed only once when the cache item does not exist, but the cache write lock duration depends on the execution time of the f method.

Here's an example:

package main

import (
"fmt"
"github.com/gogf/gf/v2/os/gcache"
"github.com/gogf/gf/v2/os/gctx"
"time"
)

func main() {
var (
ch = make(chan struct{}, 0)
ctx = gctx.New()
key = `key`
value = `value`
)
for i := 0; i < 10; i++ {
go func(index int) {
<-ch
_, err := gcache.GetOrSetFuncLock(ctx, key, func(ctx context.Context) (interface{}, error) {
fmt.Println(index, "entered")
return value, nil
}, 0)
if err != nil {
panic(err)
}
}(i)
}
close(ch)
time.Sleep(time.Second)
}

After execution, the terminal outputs (random, but only one message is output):

9 entered

You can see that when multiple goroutines concurrently call the GetOrSetFuncLock method, due to its concurrent safety control, only one goroutine's value-generation function executes successfully, and once successful, other goroutines immediately return with the data without executing their corresponding value-generation functions.

LRU Cache Eviction Control

package main

import (
"fmt"
"github.com/gogf/gf/v2/os/gcache"
"github.com/gogf/gf/v2/os/gctx"
"time"
)

func main() {
var (
ctx = gctx.New()
cache = gcache.New(2) // Set LRU eviction count
)

// Add 10 elements, no expiration
for i := 0; i < 10; i++ {
if err := cache.Set(ctx, i, i, 0); err != nil {
panic(err)
}
}
size, err := cache.Size(ctx)
if err != nil {
panic(err)
}
fmt.Println(size)

keys, err := cache.Keys(ctx)
if err != nil {
panic(err)
}
fmt.Println(keys)

// Read key name 1 to ensure that this key name is preferentially retained
value, err := cache.Get(ctx, 1)
if err != nil {
panic(err)
}
fmt.Println(value)

// After waiting for some time (checked once per second by default),
// elements will be evicted in order from oldest to newest
time.Sleep(3 * time.Second)
size, err = cache.Size(ctx)
if err != nil {
panic(err)
}
fmt.Println(size)

keys, err = cache.Keys(ctx)
if err != nil {
panic(err)
}
fmt.Println(keys)
}

After execution, the output is:

[2 3 5 6 7 0 1 4 8 9]
1
2
[1 9]

Performance Testing

Test Environment

CPU: Intel(R) Core(TM) i5-4460  CPU @ 3.20GHz
MEM: 8GB
SYS: Ubuntu 16.04 amd64

Test Results

john@john-B85M:~/Workspace/Go/GOPATH/src/github.com/gogf/gf/v2/os/gcache$ go test *.go -bench=".*" -benchmem
goos: linux
goarch: amd64
Benchmark_CacheSet-4 2000000 897 ns/op 249 B/op 4 allocs/op
Benchmark_CacheGet-4 5000000 202 ns/op 49 B/op 1 allocs/op
Benchmark_CacheRemove-4 50000000 35.7 ns/op 0 B/op 0 allocs/op
Benchmark_CacheLruSet-4 2000000 880 ns/op 399 B/op 4 allocs/op
Benchmark_CacheLruGet-4 3000000 212 ns/op 33 B/op 1 allocs/op
Benchmark_CacheLruRemove-4 50000000 35.9 ns/op 0 B/op 0 allocs/op
Benchmark_InterfaceMapWithLockSet-4 3000000 477 ns/op 73 B/op 2 allocs/op
Benchmark_InterfaceMapWithLockGet-4 10000000 149 ns/op 0 B/op 0 allocs/op
Benchmark_InterfaceMapWithLockRemove-4 50000000 39.8 ns/op 0 B/op 0 allocs/op
Benchmark_IntMapWithLockWithLockSet-4 5000000 304 ns/op 53 B/op 0 allocs/op
Benchmark_IntMapWithLockGet-4 20000000 164 ns/op 0 B/op 0 allocs/op
Benchmark_IntMapWithLockRemove-4 50000000 33.1 ns/op 0 B/op 0 allocs/op
PASS
ok command-line-arguments 47.503s