
* Start on lua scripting * Implement evalsha, script load, script exists, and script flush * Type conversions from lua to resp/json. Refactor to make luastate and luascripts persistent in the controller. * Change controller.command and all underlying commands to return resp.Value. Serialize only during the ouput. * First stab at tile38 call from lua * Change tile38 into tile38.call in Lua * Property return errors from scripts * Minor refactoring. No locking on script run * Cleanup/refactoring * Create a pool of 5 lua states, allow for more as needed. Refactor. * Use safe map for scripts. Add a limit for max number of lua states. Refactor. * Refactor * Refactor script commands into atomic, read-only, and non-atomic classes. Proper locking for all three classes. Add tests for scripts * More tests for scripts * Properly escape newlines in lua-produced errors * Better test for readonly failure * Correctly convert ok/err messages between lua and resp. Add pcall, sha1hex, error_reply, status_reply functions to tile38 namespace in lua. * Add pcall test. Change writeErr to work with string argument * Make sure eval/evalsha never attempt to write AOF * Add eval-set and eval-get to benchmarks * Fix eval benchmark tests, add more * Improve benchmarks * Optimizations and refactoring. * Add lua memtest * Typo * Add dependency * golint fixes * gofmt fixes * Add scripting commands to the core/commands.json * Use ARGV for args inside lua
80 lines
1.6 KiB
Go
80 lines
1.6 KiB
Go
package tests
|
|
|
|
import (
|
|
"fmt"
|
|
"os"
|
|
"os/signal"
|
|
"syscall"
|
|
"testing"
|
|
)
|
|
|
|
const (
|
|
clear = "\x1b[0m"
|
|
bright = "\x1b[1m"
|
|
dim = "\x1b[2m"
|
|
black = "\x1b[30m"
|
|
red = "\x1b[31m"
|
|
green = "\x1b[32m"
|
|
yellow = "\x1b[33m"
|
|
blue = "\x1b[34m"
|
|
magenta = "\x1b[35m"
|
|
cyan = "\x1b[36m"
|
|
white = "\x1b[37m"
|
|
)
|
|
|
|
func TestAll(t *testing.T) {
|
|
mockCleanup()
|
|
defer mockCleanup()
|
|
|
|
ch := make(chan os.Signal)
|
|
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
|
|
go func() {
|
|
<-ch
|
|
mockCleanup()
|
|
os.Exit(1)
|
|
}()
|
|
|
|
mc, err := mockOpenServer()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer mc.Close()
|
|
runSubTest(t, "keys", mc, subTestKeys)
|
|
runSubTest(t, "json", mc, subTestJSON)
|
|
runSubTest(t, "search", mc, subTestSearch)
|
|
runSubTest(t, "fence", mc, subTestFence)
|
|
runSubTest(t, "scripts", mc, subTestScripts)
|
|
}
|
|
|
|
func runSubTest(t *testing.T, name string, mc *mockServer, test func(t *testing.T, mc *mockServer)) {
|
|
t.Run(name, func(t *testing.T) {
|
|
fmt.Printf(bright+"Testing %s\n"+clear, name)
|
|
test(t, mc)
|
|
})
|
|
}
|
|
|
|
func runStep(t *testing.T, mc *mockServer, name string, step func(mc *mockServer) error) {
|
|
t.Run(name, func(t *testing.T) {
|
|
if err := func() error {
|
|
// reset the current server
|
|
mc.ResetConn()
|
|
defer mc.ResetConn()
|
|
// clear the database so the test is consistent
|
|
if err := mc.DoBatch([][]interface{}{
|
|
{"OUTPUT", "resp"}, {"OK"},
|
|
{"FLUSHDB"}, {"OK"},
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
if err := step(mc); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}(); err != nil {
|
|
fmt.Printf("["+red+"fail"+clear+"]: %s\n", name)
|
|
t.Fatal(err)
|
|
}
|
|
fmt.Printf("["+green+"ok"+clear+"]: %s\n", name)
|
|
})
|
|
}
|