
This commit changes the logic for managing the expiration of objects in the database. Before: There was a server-wide hashmap that stored the collection key, id, and expiration timestamp for all objects that had a TTL. The hashmap was occasionally probed at 20 random positions, looking for objects that have expired. Those expired objects were immediately deleted, and if there was 5 or more objects deleted, then the probe happened again, with no delay. If the number of objects was less than 5 then the there was a 1/10th of a second delay before the next probe. Now: Rather than a server-wide hashmap, each collection has its own ordered priority queue that stores objects with TTLs. Rather than probing, there is a background routine that executes every 1/10th of a second, which pops the expired objects from the collection queues, and deletes them. The collection/queue method is a more stable approach than the hashmap/probing method. With probing, we can run into major cache misses for some cases where there is wide TTL duration, such as in the hours or days. This may cause the system to occasionally fall behind, leaving should-be expired objects in memory. Using a queue, there is no cache misses, all objects that should be expired will be right away, regardless of the TTL durations. Fixes #616
134 lines
3.2 KiB
Go
134 lines
3.2 KiB
Go
// Test Tile38 for Expiration Drift
|
|
// Issue #616
|
|
|
|
package main
|
|
|
|
import (
|
|
"fmt"
|
|
"math/rand"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/gomodule/redigo/redis"
|
|
"github.com/tidwall/btree"
|
|
"github.com/tidwall/gjson"
|
|
"github.com/tidwall/sjson"
|
|
)
|
|
|
|
const exsecs = 10
|
|
const key = "__issue_616__"
|
|
|
|
func makeID() string {
|
|
const chars = "0123456789abcdefghijklmnopqrstuvwxyz-"
|
|
var buf [10]byte
|
|
rand.Read(buf[:])
|
|
for i := 0; i < len(buf); i++ {
|
|
buf[i] = chars[int(buf[i])%len(chars)]
|
|
}
|
|
return string(buf[:])
|
|
}
|
|
|
|
func main() {
|
|
fmt.Printf(
|
|
"The SCAN and ACTUAL values should reach about 1850 and stay\n" +
|
|
"roughly the same from there on.\n")
|
|
var mu sync.Mutex
|
|
objs := btree.New(func(a, b interface{}) bool {
|
|
ajson := a.(string)
|
|
bjson := b.(string)
|
|
return gjson.Get(ajson, "id").String() < gjson.Get(bjson, "id").String()
|
|
})
|
|
expires := btree.New(func(a, b interface{}) bool {
|
|
ajson := a.(string)
|
|
bjson := b.(string)
|
|
if gjson.Get(ajson, "properties.ex").Int() < gjson.Get(bjson, "properties.ex").Int() {
|
|
return true
|
|
}
|
|
if gjson.Get(ajson, "properties.ex").Int() > gjson.Get(bjson, "properties.ex").Int() {
|
|
return false
|
|
}
|
|
return gjson.Get(ajson, "id").String() < gjson.Get(bjson, "id").String()
|
|
})
|
|
|
|
conn := must(redis.Dial("tcp", ":9851")).(redis.Conn)
|
|
must(conn.Do("DROP", key))
|
|
must(nil, conn.Close())
|
|
|
|
go func() {
|
|
conn := must(redis.Dial("tcp", ":9851")).(redis.Conn)
|
|
defer conn.Close()
|
|
for {
|
|
ex := time.Now().UnixNano() + int64(exsecs*time.Second)
|
|
for i := 0; i < 10; i++ {
|
|
id := makeID()
|
|
x := rand.Float64()*360 - 180
|
|
y := rand.Float64()*180 - 90
|
|
obj := fmt.Sprintf(`{"type":"Feature","geometry":{"type":"Point","coordinates":[%f,%f]},"properties":{}}`, x, y)
|
|
obj, _ = sjson.Set(obj, "properties.ex", ex)
|
|
obj, _ = sjson.Set(obj, "id", id)
|
|
res := must(redis.String(conn.Do("SET", key, id, "ex", exsecs, "OBJECT", obj))).(string)
|
|
if res != "OK" {
|
|
panic(fmt.Sprintf("expected 'OK', got '%s'", res))
|
|
}
|
|
mu.Lock()
|
|
prev := objs.Set(obj)
|
|
if prev != nil {
|
|
expires.Delete(obj)
|
|
}
|
|
expires.Set(obj)
|
|
mu.Unlock()
|
|
}
|
|
time.Sleep(time.Second / 20)
|
|
}
|
|
}()
|
|
|
|
go func() {
|
|
conn := must(redis.Dial("tcp", ":9851")).(redis.Conn)
|
|
defer conn.Close()
|
|
for {
|
|
time.Sleep(time.Second * 5)
|
|
must(conn.Do("AOFSHRINK"))
|
|
}
|
|
}()
|
|
|
|
go func() {
|
|
conn := must(redis.Dial("tcp", ":9851")).(redis.Conn)
|
|
defer conn.Close()
|
|
must(conn.Do("OUTPUT", "JSON"))
|
|
for {
|
|
time.Sleep(time.Second / 10)
|
|
var ids []string
|
|
res := must(redis.String(conn.Do("SCAN", key, "LIMIT", 100000000))).(string)
|
|
gjson.Get(res, "objects").ForEach(func(_, res gjson.Result) bool {
|
|
ids = append(ids, res.Get("id").String())
|
|
return true
|
|
})
|
|
now := time.Now().UnixNano()
|
|
mu.Lock()
|
|
var exobjs []string
|
|
expires.Ascend(nil, func(v interface{}) bool {
|
|
ex := gjson.Get(v.(string), "properties.ex").Int()
|
|
if ex > now {
|
|
return false
|
|
}
|
|
exobjs = append(exobjs, v.(string))
|
|
return true
|
|
})
|
|
for _, obj := range exobjs {
|
|
objs.Delete(obj)
|
|
expires.Delete(obj)
|
|
}
|
|
fmt.Printf("\rSCAN: %d, ACTUAL: %d ", len(ids), objs.Len())
|
|
mu.Unlock()
|
|
}
|
|
}()
|
|
select {}
|
|
}
|
|
|
|
func must(v interface{}, err error) interface{} {
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return v
|
|
}
|