Compare commits

..

No commits in common. "master" and "gql_cataclysm" have entirely different histories.

21 changed files with 1702 additions and 1923 deletions

@ -17,9 +17,7 @@ func main() {
db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true)) db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true))
check(err) check(err)
ctx, err := gv.NewContext(&gv.BadgerDB{ ctx, err := gv.NewContext(db, gv.NewConsoleLogger([]string{"test", "signal"}))
DB: db,
}, gv.NewConsoleLogger([]string{"test"}))
check(err) check(err)
gql_ext, err := gv.NewGQLExt(ctx, ":8080", nil, nil) gql_ext, err := gv.NewGQLExt(ctx, ":8080", nil, nil)
@ -27,16 +25,13 @@ func main() {
listener_ext := gv.NewListenerExt(1000) listener_ext := gv.NewListenerExt(1000)
n1, err := gv.NewNode(ctx, nil, "LockableNode", 1000, gv.NewLockableExt(nil)) n1, err := gv.NewNode(ctx, nil, "Lockable", 1000, gv.NewLockableExt(nil))
check(err) check(err)
n2, err := gv.NewNode(ctx, nil, "LockableNode", 1000, gv.NewLockableExt([]gv.NodeID{n1.ID})) n2, err := gv.NewNode(ctx, nil, "Lockable", 1000, gv.NewLockableExt([]gv.NodeID{n1.ID}))
check(err) check(err)
n3, err := gv.NewNode(ctx, nil, "LockableNode", 1000, gv.NewLockableExt(nil)) _, err = gv.NewNode(ctx, nil, "Lockable", 1000, gql_ext, listener_ext, gv.NewLockableExt([]gv.NodeID{n2.ID}))
check(err)
_, err = gv.NewNode(ctx, nil, "LockableNode", 1000, gql_ext, listener_ext, gv.NewLockableExt([]gv.NodeID{n2.ID, n3.ID}))
check(err) check(err)
for true { for true {

File diff suppressed because it is too large Load Diff

193
db.go

@ -3,226 +3,153 @@ package graphvent
import ( import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"reflect"
"sync"
badger "github.com/dgraph-io/badger/v3" badger "github.com/dgraph-io/badger/v3"
) )
type Database interface { func WriteNodeInit(ctx *Context, node *Node) error {
WriteNodeInit(*Context, *Node) error
WriteNodeChanges(*Context, *Node, map[ExtType]Changes) error
LoadNode(*Context, NodeID) (*Node, error)
}
const WRITE_BUFFER_SIZE = 1000000
type BadgerDB struct {
*badger.DB
sync.Mutex
buffer [WRITE_BUFFER_SIZE]byte
}
func (db *BadgerDB) WriteNodeInit(ctx *Context, node *Node) error {
if node == nil { if node == nil {
return fmt.Errorf("Cannot serialize nil *Node") return fmt.Errorf("Cannot serialize nil *Node")
} }
return db.Update(func(tx *badger.Txn) error { return ctx.DB.Update(func(tx *badger.Txn) error {
db.Lock()
defer db.Unlock()
// Get the base key bytes // Get the base key bytes
id_ser, err := node.ID.MarshalBinary() id_ser, err := node.ID.MarshalBinary()
if err != nil { if err != nil {
return err return err
} }
cur := 0
// Write Node value // Write Node value
written, err := Serialize(ctx, node, db.buffer[cur:]) node_val, err := Serialize(ctx, node)
if err != nil { if err != nil {
return err return err
} }
err = tx.Set(id_ser, node_val)
err = tx.Set(id_ser, db.buffer[cur:cur+written])
if err != nil { if err != nil {
return err return err
} }
cur += written
// Write empty signal queue // Write empty signal queue
sigqueue_id := append(id_ser, []byte(" - SIGQUEUE")...) sigqueue_id := append(id_ser, []byte(" - SIGQUEUE")...)
written, err = Serialize(ctx, node.SignalQueue, db.buffer[cur:]) sigqueue_val, err := Serialize(ctx, node.SignalQueue)
if err != nil { if err != nil {
return err return err
} }
err = tx.Set(sigqueue_id, sigqueue_val)
err = tx.Set(sigqueue_id, db.buffer[cur:cur+written])
if err != nil { if err != nil {
return err return err
} }
cur += written
// Write node extension list // Write node extension list
ext_list := []ExtType{} ext_list := []ExtType{}
for ext_type := range(node.Extensions) { for ext_type := range(node.Extensions) {
ext_list = append(ext_list, ext_type) ext_list = append(ext_list, ext_type)
} }
written, err = Serialize(ctx, ext_list, db.buffer[cur:]) ext_list_val, err := Serialize(ctx, ext_list)
if err != nil { if err != nil {
return err return err
} }
ext_list_id := append(id_ser, []byte(" - EXTLIST")...) ext_list_id := append(id_ser, []byte(" - EXTLIST")...)
err = tx.Set(ext_list_id, db.buffer[cur:cur+written]) err = tx.Set(ext_list_id, ext_list_val)
if err != nil { if err != nil {
return err return err
} }
cur += written
// For each extension: // For each extension:
for ext_type, ext := range(node.Extensions) { for ext_type, ext := range(node.Extensions) {
ext_info, exists := ctx.Extensions[ext_type] // Write each extension's current value
if exists == false {
return fmt.Errorf("Cannot serialize node with unknown extension %s", reflect.TypeOf(ext))
}
ext_value := reflect.ValueOf(ext).Elem()
ext_id := binary.BigEndian.AppendUint64(id_ser, uint64(ext_type)) ext_id := binary.BigEndian.AppendUint64(id_ser, uint64(ext_type))
ext_val, err := Serialize(ctx, ext)
// Write each field to a seperate key if err != nil {
for field_tag, field_info := range(ext_info.Fields) { return err
field_value := ext_value.FieldByIndex(field_info.Index)
field_id := make([]byte, len(ext_id) + 8)
tmp := binary.BigEndian.AppendUint64(ext_id, uint64(GetFieldTag(string(field_tag))))
copy(field_id, tmp)
written, err := SerializeValue(ctx, field_value, db.buffer[cur:])
if err != nil {
return fmt.Errorf("Extension serialize err: %s, %w", reflect.TypeOf(ext), err)
}
err = tx.Set(field_id, db.buffer[cur:cur+written])
if err != nil {
return fmt.Errorf("Extension set err: %s, %w", reflect.TypeOf(ext), err)
}
cur += written
} }
err = tx.Set(ext_id, ext_val)
} }
return nil return nil
}) })
} }
func (db *BadgerDB) WriteNodeChanges(ctx *Context, node *Node, changes map[ExtType]Changes) error { func WriteNodeChanges(ctx *Context, node *Node, changes map[ExtType]Changes) error {
return db.Update(func(tx *badger.Txn) error { return ctx.DB.Update(func(tx *badger.Txn) error {
db.Lock()
defer db.Unlock()
// Get the base key bytes // Get the base key bytes
id_bytes := ([16]byte)(node.ID) id_ser, err := node.ID.MarshalBinary()
if err != nil {
cur := 0 return err
}
// Write the signal queue if it needs to be written // Write the signal queue if it needs to be written
if node.writeSignalQueue { if node.writeSignalQueue {
node.writeSignalQueue = false node.writeSignalQueue = false
sigqueue_id := append(id_bytes[:], []byte(" - SIGQUEUE")...) sigqueue_id := append(id_ser, []byte(" - SIGQUEUE")...)
written, err := Serialize(ctx, node.SignalQueue, db.buffer[cur:]) sigqueue_val, err := Serialize(ctx, node.SignalQueue)
if err != nil { if err != nil {
return fmt.Errorf("SignalQueue Serialize Error: %+v, %w", node.SignalQueue, err) return err
} }
err = tx.Set(sigqueue_id, db.buffer[cur:cur+written]) err = tx.Set(sigqueue_id, sigqueue_val)
if err != nil { if err != nil {
return fmt.Errorf("SignalQueue set error: %+v, %w", node.SignalQueue, err) return err
} }
cur += written
} }
// For each ext in changes // For each ext in changes
for ext_type, changes := range(changes) { for ext_type := range(changes) {
ext_info, exists := ctx.Extensions[ext_type] // Write each ext
if exists == false {
return fmt.Errorf("%s is not an extension in ctx", ext_type)
}
ext, exists := node.Extensions[ext_type] ext, exists := node.Extensions[ext_type]
if exists == false { if exists == false {
return fmt.Errorf("%s is not an extension in %s", ext_type, node.ID) return fmt.Errorf("%s is not an extension in %s", ext_type, node.ID)
} }
ext_id := binary.BigEndian.AppendUint64(id_bytes[:], uint64(ext_type)) ext_id := binary.BigEndian.AppendUint64(id_ser, uint64(ext_type))
ext_value := reflect.ValueOf(ext) ext_ser, err := Serialize(ctx, ext)
if err != nil {
// Write each field return err
for _, tag := range(changes) { }
field_info, exists := ext_info.Fields[tag]
if exists == false {
return fmt.Errorf("Cannot serialize field %s of extension %s, does not exist", tag, ext_type)
}
field_value := ext_value.FieldByIndex(field_info.Index)
field_id := make([]byte, len(ext_id) + 8)
tmp := binary.BigEndian.AppendUint64(ext_id, uint64(GetFieldTag(string(tag))))
copy(field_id, tmp)
written, err := SerializeValue(ctx, field_value, db.buffer[cur:])
if err != nil {
return fmt.Errorf("Extension serialize err: %s, %w", reflect.TypeOf(ext), err)
}
err = tx.Set(field_id, db.buffer[cur:cur+written]) err = tx.Set(ext_id, ext_ser)
if err != nil { if err != nil {
return fmt.Errorf("Extension set err: %s, %w", reflect.TypeOf(ext), err) return err
}
cur += written
} }
} }
return nil return nil
}) })
} }
func (db *BadgerDB) LoadNode(ctx *Context, id NodeID) (*Node, error) { func LoadNode(ctx *Context, id NodeID) (*Node, error) {
var node *Node = nil var node *Node = nil
err := ctx.DB.View(func(tx *badger.Txn) error {
err := db.View(func(tx *badger.Txn) error {
// Get the base key bytes // Get the base key bytes
id_ser, err := id.MarshalBinary() id_ser, err := id.MarshalBinary()
if err != nil { if err != nil {
return fmt.Errorf("Failed to serialize node_id: %w", err) return err
} }
// Get the node value // Get the node value
node_item, err := tx.Get(id_ser) node_item, err := tx.Get(id_ser)
if err != nil { if err != nil {
return fmt.Errorf("Failed to get node_item: %w", NodeNotFoundError) return err
} }
err = node_item.Value(func(val []byte) error { err = node_item.Value(func(val []byte) error {
ctx.Log.Logf("db", "DESERIALIZE_NODE(%d bytes): %+v", len(val), val)
node, err = Deserialize[*Node](ctx, val) node, err = Deserialize[*Node](ctx, val)
return err return err
}) })
if err != nil { if err != nil {
return fmt.Errorf("Failed to deserialize Node %s - %w", id, err) return nil
} }
// Get the signal queue // Get the signal queue
sigqueue_id := append(id_ser, []byte(" - SIGQUEUE")...) sigqueue_id := append(id_ser, []byte(" - SIGQUEUE")...)
sigqueue_item, err := tx.Get(sigqueue_id) sigqueue_item, err := tx.Get(sigqueue_id)
if err != nil { if err != nil {
return fmt.Errorf("Failed to get sigqueue_id: %w", err) return err
} }
err = sigqueue_item.Value(func(val []byte) error { err = sigqueue_item.Value(func(val []byte) error {
node.SignalQueue, err = Deserialize[[]QueuedSignal](ctx, val) node.SignalQueue, err = Deserialize[[]QueuedSignal](ctx, val)
return err return err
}) })
if err != nil { if err != nil {
return fmt.Errorf("Failed to deserialize []QueuedSignal for %s: %w", id, err) return err
} }
// Get the extension list // Get the extension list
@ -241,34 +168,20 @@ func (db *BadgerDB) LoadNode(ctx *Context, id NodeID) (*Node, error) {
// Get the extensions // Get the extensions
for _, ext_type := range(ext_list) { for _, ext_type := range(ext_list) {
ext_id := binary.BigEndian.AppendUint64(id_ser, uint64(ext_type)) ext_id := binary.BigEndian.AppendUint64(id_ser, uint64(ext_type))
ext_info, exists := ctx.Extensions[ext_type] ext_item, err := tx.Get(ext_id)
if exists == false { if err != nil {
return fmt.Errorf("Extension %s not in context", ext_type) return err
} }
ext := reflect.New(ext_info.Type) var ext Extension
for field_tag, field_info := range(ext_info.Fields) { err = ext_item.Value(func(val []byte) error {
field_id := binary.BigEndian.AppendUint64(ext_id, uint64(GetFieldTag(string(field_tag)))) ext, err = Deserialize[Extension](ctx, val)
field_item, err := tx.Get(field_id) return err
if err != nil { })
return fmt.Errorf("Failed to find key for %s:%s(%x) - %w", ext_type, field_tag, field_id, err) if err != nil {
} return err
err = field_item.Value(func(val []byte) error {
value, _, err := DeserializeValue(ctx, val, field_info.Type)
if err != nil {
return err
}
ext.Elem().FieldByIndex(field_info.Index).Set(value)
return nil
})
if err != nil {
return err
}
} }
node.Extensions[ext_type] = ext
node.Extensions[ext_type] = ext.Interface().(Extension)
} }
return nil return nil
@ -276,8 +189,6 @@ func (db *BadgerDB) LoadNode(ctx *Context, id NodeID) (*Node, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} else if node == nil {
return nil, fmt.Errorf("Tried to return nil *Node from BadgerDB.LoadNode without error")
} }
return node, nil return node, nil

@ -0,0 +1,156 @@
package graphvent
import (
"time"
"fmt"
)
type EventCommand string
type EventState string
type EventExt struct {
Name string `gv:"name"`
State EventState `gv:"state"`
StateStart time.Time `gv:"state_start"`
Parent NodeID `gv:"parent" node:"Base"`
}
func (ext *EventExt) Load(ctx *Context, node *Node) error {
return nil
}
func (ext *EventExt) Unload(ctx *Context, node *Node) {
}
func NewEventExt(parent NodeID, name string) *EventExt {
return &EventExt{
Name: name,
State: "init",
Parent: parent,
}
}
type EventStateSignal struct {
SignalHeader
Source NodeID `gv:"source"`
State EventState `gv:"state"`
Time time.Time `gv:"time"`
}
func (signal EventStateSignal) String() string {
return fmt.Sprintf("EventStateSignal(%s, %s, %s, %+v)", signal.SignalHeader, signal.Source, signal.State, signal.Time)
}
func NewEventStateSignal(source NodeID, state EventState, t time.Time) *EventStateSignal {
return &EventStateSignal{
SignalHeader: NewSignalHeader(),
Source: source,
State: state,
Time: t,
}
}
type EventControlSignal struct {
SignalHeader
Command EventCommand `gv:"command"`
}
func (signal EventControlSignal) String() string {
return fmt.Sprintf("EventControlSignal(%s, %s)", signal.SignalHeader, signal.Command)
}
func NewEventControlSignal(command EventCommand) *EventControlSignal {
return &EventControlSignal{
NewSignalHeader(),
command,
}
}
func (ext *EventExt) UpdateState(node *Node, changes Changes, state EventState, state_start time.Time) {
if ext.State != state {
ext.StateStart = state_start
changes.Add("state")
ext.State = state
node.QueueSignal(time.Now(), NewEventStateSignal(node.ID, ext.State, time.Now()))
}
}
func (ext *EventExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]SendMsg, Changes) {
var messages []SendMsg = nil
var changes = Changes{}
return messages, changes
}
type TestEventExt struct {
Length time.Duration
}
func (ext *TestEventExt) Load(ctx *Context, node *Node) error {
return nil
}
func (ext *TestEventExt) Unload(ctx *Context, node *Node) {
}
type EventCommandMap map[EventCommand]map[EventState]EventState
var test_event_commands = EventCommandMap{
"ready?": {
"init": "ready",
},
"start": {
"ready": "running",
},
"abort": {
"ready": "init",
},
"stop": {
"running": "stopped",
},
"finish": {
"running": "done",
},
}
func (ext *TestEventExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]SendMsg, Changes) {
var messages []SendMsg = nil
var changes = Changes{}
switch sig := signal.(type) {
case *EventControlSignal:
event_ext, err := GetExt[EventExt](node)
if err != nil {
messages = append(messages, SendMsg{source, NewErrorSignal(sig.Id, "not_event")})
} else {
ctx.Log.Logf("event", "%s got %s EventControlSignal while in %s", node.ID, sig.Command, event_ext.State)
new_state, error_signal := event_ext.ValidateEventCommand(sig, test_event_commands)
if error_signal != nil {
messages = append(messages, SendMsg{source, error_signal})
} else {
switch sig.Command {
case "start":
node.QueueSignal(time.Now().Add(ext.Length), NewEventControlSignal("finish"))
}
event_ext.UpdateState(node, changes, new_state, time.Now())
messages = append(messages, SendMsg{source, NewSuccessSignal(sig.Id)})
}
}
}
return messages, changes
}
func(ext *EventExt) ValidateEventCommand(signal *EventControlSignal, commands EventCommandMap) (EventState, *ErrorSignal) {
transitions, command_mapped := commands[signal.Command]
if command_mapped == false {
return "", NewErrorSignal(signal.Id, "unknown command %s", signal.Command)
} else {
new_state, valid_transition := transitions[ext.State]
if valid_transition == false {
return "", NewErrorSignal(signal.Id, "invalid command state %s(%s)", signal.Command, ext.State)
} else {
return new_state, nil
}
}
}

@ -0,0 +1,86 @@
package graphvent
import (
"crypto/ed25519"
"testing"
"time"
"crypto/rand"
)
func TestEvent(t *testing.T) {
ctx := logTestContext(t, []string{"event", "listener", "listener_debug"})
err := RegisterExtension[TestEventExt](ctx, nil)
fatalErr(t, err)
err = RegisterObject[TestEventExt](ctx)
fatalErr(t, err)
event_public, event_private, err := ed25519.GenerateKey(rand.Reader)
event_listener := NewListenerExt(100)
event, err := NewNode(ctx, event_private, "Base", 100, NewEventExt(KeyID(event_public), "Test Event"), &TestEventExt{time.Second}, event_listener)
fatalErr(t, err)
response, signals := testSend(t, ctx, NewEventControlSignal("ready?"), event, event)
switch resp := response.(type) {
case *SuccessSignal:
case *ErrorSignal:
t.Fatalf("Error response %+v", resp.Error)
default:
t.Fatalf("Unexpected response %+v", resp)
}
var state_signal *EventStateSignal = nil
for _, signal := range(signals) {
event_state, is_event_state := signal.(*EventStateSignal)
if is_event_state == true && event_state.Source == event.ID && event_state.State == "ready" {
state_signal = event_state
break
}
}
if state_signal == nil {
state_signal, err = WaitForSignal(event_listener.Chan, 10*time.Millisecond, func(sig *EventStateSignal) bool {
return sig.Source == event.ID && sig.State == "ready"
})
fatalErr(t, err)
}
response, signals = testSend(t, ctx, NewEventControlSignal("start"), event, event)
switch resp := response.(type) {
case *SuccessSignal:
case *ErrorSignal:
t.Fatalf("Error response %+v", resp.Error)
default:
t.Fatalf("Unexpected response %+v", resp)
}
state_signal = nil
for _, signal := range(signals) {
event_state, is_event_state := signal.(*EventStateSignal)
if is_event_state == true && event_state.Source == event.ID && event_state.State == "running" {
state_signal = event_state
break
}
}
if state_signal == nil {
state_signal, err = WaitForSignal(event_listener.Chan, 10*time.Millisecond, func(sig *EventStateSignal) bool {
return sig.Source == event.ID && sig.State == "running"
})
fatalErr(t, err)
}
_, err = WaitForSignal(event_listener.Chan, time.Second * 2, func(sig *EventStateSignal) bool {
return sig.Source == event.ID && sig.State == "done"
})
fatalErr(t, err)
response, signals = testSend(t, ctx, NewEventControlSignal("start"), event, event)
switch resp := response.(type) {
case *SuccessSignal:
t.Fatalf("Success response starting finished TestEventExt")
case *ErrorSignal:
default:
t.Fatalf("Unexpected response %+v", resp)
}
}

@ -1,12 +1,13 @@
package graphvent package graphvent
type Tag string import (
type Changes []Tag
)
// Extensions are data attached to nodes that process signals // Extensions are data attached to nodes that process signals
type Extension interface { type Extension interface {
// Called to process incoming signals, returning changes and messages to send // Called to process incoming signals, returning changes and messages to send
Process(*Context, *Node, NodeID, Signal) ([]Message, Changes) Process(*Context, *Node, NodeID, Signal) ([]SendMsg, Changes)
// Called when the node is loaded into a context(creation or move), so extension data can be initialized // Called when the node is loaded into a context(creation or move), so extension data can be initialized
Load(*Context, *Node) error Load(*Context, *Node) error
@ -14,3 +15,10 @@ type Extension interface {
// Called when the node is unloaded from a context(deletion or move), so extension data can be cleaned up // Called when the node is unloaded from a context(deletion or move), so extension data can be cleaned up
Unload(*Context, *Node) Unload(*Context, *Node)
} }
// Changes are lists of modifications made to extensions to be communicated
type Changes []string
func (changes *Changes) Add(fields ...string) {
new_changes := append(*changes, fields...)
changes = &new_changes
}

@ -249,14 +249,13 @@ func GQLHandler(ctx *Context, server *Node, gql_ext *GQLExt) func(http.ResponseW
for header, value := range(r.Header) { for header, value := range(r.Header) {
header_map[header] = value header_map[header] = value
} }
ctx.Log.Logm("gql", header_map, "REQUEST_HEADERS")
resolve_context, err := NewResolveContext(ctx, server, gql_ext) resolve_context, err := NewResolveContext(ctx, server, gql_ext)
if err != nil { if err != nil {
ctx.Log.Logf("gql", "GQL_AUTH_ERR: %s", err) ctx.Log.Logf("gql", "GQL_AUTH_ERR: %s", err)
json.NewEncoder(w).Encode(GQLUnauthorized("")) json.NewEncoder(w).Encode(GQLUnauthorized(""))
return return
} else {
ctx.Log.Logf("gql", "New Query: %s", resolve_context.ID)
} }
req_ctx := context.Background() req_ctx := context.Background()
@ -305,6 +304,7 @@ func sendOneResultAndClose(res *graphql.Result) chan *graphql.Result {
return resultChannel return resultChannel
} }
func getOperationTypeOfReq(p graphql.Params) string{ func getOperationTypeOfReq(p graphql.Params) string{
source := source.NewSource(&source.Source{ source := source.NewSource(&source.Source{
Body: []byte(p.RequestString), Body: []byte(p.RequestString),
@ -330,6 +330,18 @@ func getOperationTypeOfReq(p graphql.Params) string{
return "END_OF_FUNCTION" return "END_OF_FUNCTION"
} }
func GQLWSDo(ctx * Context, p graphql.Params) chan *graphql.Result {
operation := getOperationTypeOfReq(p)
ctx.Log.Logf("gqlws", "GQLWSDO_OPERATION: %s - %+v", operation, p.RequestString)
if operation == ast.OperationTypeSubscription {
return graphql.Subscribe(p)
}
res := graphql.Do(p)
return sendOneResultAndClose(res)
}
func GQLWSHandler(ctx * Context, server *Node, gql_ext *GQLExt) func(http.ResponseWriter, *http.Request) { func GQLWSHandler(ctx * Context, server *Node, gql_ext *GQLExt) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r * http.Request) { return func(w http.ResponseWriter, r * http.Request) {
ctx.Log.Logf("gqlws_new", "HANDLING %s",r.RemoteAddr) ctx.Log.Logf("gqlws_new", "HANDLING %s",r.RemoteAddr)
@ -339,12 +351,11 @@ func GQLWSHandler(ctx * Context, server *Node, gql_ext *GQLExt) func(http.Respon
header_map[header] = value header_map[header] = value
} }
ctx.Log.Logm("gql", header_map, "REQUEST_HEADERS")
resolve_context, err := NewResolveContext(ctx, server, gql_ext) resolve_context, err := NewResolveContext(ctx, server, gql_ext)
if err != nil { if err != nil {
ctx.Log.Logf("gql", "GQL_AUTH_ERR: %s", err) ctx.Log.Logf("gql", "GQL_AUTH_ERR: %s", err)
return return
} else {
ctx.Log.Logf("gql", "New Subscription: %s", resolve_context.ID)
} }
req_ctx := context.Background() req_ctx := context.Background()
@ -418,14 +429,11 @@ func GQLWSHandler(ctx * Context, server *Node, gql_ext *GQLExt) func(http.Respon
params.VariableValues = msg.Payload.Variables params.VariableValues = msg.Payload.Variables
} }
var res_chan chan *graphql.Result res_chan := GQLWSDo(ctx, params)
operation := getOperationTypeOfReq(params) if res_chan == nil {
ctx.Log.Logf("gqlws", "res_chan is nil")
if operation == ast.OperationTypeSubscription {
res_chan = graphql.Subscribe(params)
} else { } else {
res := graphql.Do(params) ctx.Log.Logf("gqlws", "res_chan: %+v", res_chan)
res_chan = sendOneResultAndClose(res)
} }
go func(res_chan chan *graphql.Result) { go func(res_chan chan *graphql.Result) {
@ -501,7 +509,7 @@ type Field struct {
type NodeResult struct { type NodeResult struct {
NodeID NodeID NodeID NodeID
NodeType NodeType NodeType NodeType
Data map[string]interface{} Data map[ExtType]map[string]interface{}
} }
type ListField struct { type ListField struct {
@ -518,7 +526,6 @@ type SelfField struct {
type SubscriptionInfo struct { type SubscriptionInfo struct {
ID uuid.UUID ID uuid.UUID
NodeCache *map[NodeID]NodeResult
Channel chan interface{} Channel chan interface{}
} }
@ -537,13 +544,11 @@ type GQLExt struct {
State string `gv:"state"` State string `gv:"state"`
TLSKey []byte `gv:"tls_key"` TLSKey []byte `gv:"tls_key"`
TLSCert []byte `gv:"tls_cert"` TLSCert []byte `gv:"tls_cert"`
Listen string `gv:"listen" gql:"GQLListen"` Listen string `gv:"listen"`
} }
func (ext *GQLExt) Load(ctx *Context, node *Node) error { func (ext *GQLExt) Load(ctx *Context, node *Node) error {
ctx.Log.Logf("gql", "Loading GQL server extension on %s", node.ID) ctx.Log.Logf("gql", "Loading GQL server extension on %s", node.ID)
ext.resolver_response = map[uuid.UUID]chan Signal{}
ext.subscriptions = []SubscriptionInfo{}
return ext.StartGQLServer(ctx, node) return ext.StartGQLServer(ctx, node)
} }
@ -557,7 +562,14 @@ func (ext *GQLExt) Unload(ctx *Context, node *Node) {
} }
} }
func (ext *GQLExt) AddSubscription(id uuid.UUID, ctx *ResolveContext) (chan interface{}, error) { func (ext *GQLExt) PostDeserialize(*Context) error {
ext.resolver_response = map[uuid.UUID]chan Signal{}
ext.subscriptions = []SubscriptionInfo{}
return nil
}
func (ext *GQLExt) AddSubscription(id uuid.UUID) (chan interface{}, error) {
ext.subscriptions_lock.Lock() ext.subscriptions_lock.Lock()
defer ext.subscriptions_lock.Unlock() defer ext.subscriptions_lock.Unlock()
@ -571,7 +583,6 @@ func (ext *GQLExt) AddSubscription(id uuid.UUID, ctx *ResolveContext) (chan inte
ext.subscriptions = append(ext.subscriptions, SubscriptionInfo{ ext.subscriptions = append(ext.subscriptions, SubscriptionInfo{
id, id,
&ctx.NodeCache,
c, c,
}) })
@ -619,10 +630,10 @@ func (ext *GQLExt) FreeResponseChannel(req_id uuid.UUID) chan Signal {
return response_chan return response_chan
} }
func (ext *GQLExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]Message, Changes) { func (ext *GQLExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]SendMsg, Changes) {
// Process ReadResultSignalType by forwarding it to the waiting resolver // Process ReadResultSignalType by forwarding it to the waiting resolver
var changes Changes = nil var changes Changes = nil
var messages []Message = nil var messages []SendMsg = nil
switch sig := signal.(type) { switch sig := signal.(type) {
case *SuccessSignal: case *SuccessSignal:
@ -634,6 +645,8 @@ func (ext *GQLExt) Process(ctx *Context, node *Node, source NodeID, signal Signa
default: default:
ctx.Log.Logf("gql", "Resolver channel overflow %+v", sig) ctx.Log.Logf("gql", "Resolver channel overflow %+v", sig)
} }
} else {
ctx.Log.Logf("gql", "received success signal response %+v with no mapped resolver", sig)
} }
case *ErrorSignal: case *ErrorSignal:
@ -646,6 +659,9 @@ func (ext *GQLExt) Process(ctx *Context, node *Node, source NodeID, signal Signa
default: default:
ctx.Log.Logf("gql", "Resolver channel overflow %+v", sig) ctx.Log.Logf("gql", "Resolver channel overflow %+v", sig)
} }
} else {
ctx.Log.Logf("gql", "received error signal response %+v with no mapped resolver", sig)
} }
case *ReadResultSignal: case *ReadResultSignal:
@ -653,22 +669,23 @@ func (ext *GQLExt) Process(ctx *Context, node *Node, source NodeID, signal Signa
if response_chan != nil { if response_chan != nil {
select { select {
case response_chan <- sig: case response_chan <- sig:
ctx.Log.Logf("gql", "Forwarded to resolver, %+v", sig)
default: default:
ctx.Log.Logf("gql", "Resolver channel overflow %+v", sig) ctx.Log.Logf("gql", "Resolver channel overflow %+v", sig)
} }
} else {
ctx.Log.Logf("gql", "Received read result that wasn't expected - %+v", sig)
} }
case *StatusSignal: case *StatusSignal:
ext.subscriptions_lock.RLock() ext.subscriptions_lock.RLock()
for _, sub := range(ext.subscriptions) { ctx.Log.Logf("gql", "forwarding status signal from %+v to resolvers %+v", sig.Source, ext.subscriptions)
_, cached := (*sub.NodeCache)[sig.Source] for _, resolver := range(ext.subscriptions) {
if cached { select {
select { case resolver.Channel <- sig:
case sub.Channel <- sig: ctx.Log.Logf("gql_subscribe", "forwarded status signal to resolver: %+v", resolver.ID)
ctx.Log.Logf("gql", "forwarded status signal %+v to subscription: %s", sig, sub.ID) default:
default: ctx.Log.Logf("gql_subscribe", "resolver channel overflow: %+v", resolver.ID)
ctx.Log.Logf("gql", "subscription channel overflow: %s", sub.ID)
}
} }
} }
ext.subscriptions_lock.RUnlock() ext.subscriptions_lock.RUnlock()

@ -25,12 +25,7 @@ func ResolveNodeType(p graphql.ResolveParams) (interface{}, error) {
return uint64(node.NodeType), nil return uint64(node.NodeType), nil
} }
type FieldIndex struct { func GetFieldNames(ctx *Context, selection_set *ast.SelectionSet) []string {
Extension ExtType
Tag string
}
func GetFields(selection_set *ast.SelectionSet) []string {
names := []string{} names := []string{}
if selection_set == nil { if selection_set == nil {
return names return names
@ -39,12 +34,10 @@ func GetFields(selection_set *ast.SelectionSet) []string {
for _, sel := range(selection_set.Selections) { for _, sel := range(selection_set.Selections) {
switch field := sel.(type) { switch field := sel.(type) {
case *ast.Field: case *ast.Field:
if field.Name.Value == "ID" || field.Name.Value == "Type" {
continue
}
names = append(names, field.Name.Value) names = append(names, field.Name.Value)
case *ast.InlineFragment: case *ast.InlineFragment:
names = append(names, GetFields(field.SelectionSet)...) default:
ctx.Log.Logf("gql", "Unknown selection type: %s", reflect.TypeOf(field))
} }
} }
@ -52,96 +45,108 @@ func GetFields(selection_set *ast.SelectionSet) []string {
} }
// Returns the fields that need to be resolved // Returns the fields that need to be resolved
func GetResolveFields(p graphql.ResolveParams) []string { func GetResolveFields(id NodeID, ctx *ResolveContext, p graphql.ResolveParams) (map[ExtType][]string, error) {
fields := []string{} node_info, mapped := ctx.Context.NodeTypes[p.Info.ReturnType.Name()]
if mapped == false {
return nil, fmt.Errorf("No NodeType %s", p.Info.ReturnType.Name())
}
fields := map[ExtType][]string{}
names := []string{}
for _, field := range(p.Info.FieldASTs) { for _, field := range(p.Info.FieldASTs) {
fields = append(fields, GetFields(field.SelectionSet)...) names = append(names, GetFieldNames(ctx.Context, field.SelectionSet)...)
} }
return fields cache, node_cached := ctx.NodeCache[id]
} for _, name := range(names) {
if name == "ID" || name == "Type" {
continue
}
func ResolveNode(id NodeID, p graphql.ResolveParams) (NodeResult, error) { ext_type, field_mapped := node_info.Fields[name]
ctx, err := PrepResolve(p) if field_mapped == false {
if err != nil { return nil, fmt.Errorf("NodeType %s does not have field %s", p.Info.ReturnType.Name(), name)
return NodeResult{}, err }
}
switch source := p.Source.(type) { ext_fields, exists := fields[ext_type]
case *StatusSignal: if exists == false {
cached_node, cached := ctx.NodeCache[source.Source] ext_fields = []string{}
if cached {
for _, field_name := range(source.Fields) {
_, cached := cached_node.Data[field_name]
if cached {
delete(cached_node.Data, field_name)
}
}
ctx.NodeCache[source.Source] = cached_node
} }
}
cache, node_cached := ctx.NodeCache[id] if node_cached {
fields := GetResolveFields(p) ext_cache, ext_cached := cache.Data[ext_type]
var not_cached []string if ext_cached {
if node_cached { _, field_cached := ext_cache[name]
not_cached = []string{}
for _, field := range(fields) {
if node_cached {
_, field_cached := cache.Data[field]
if field_cached { if field_cached {
continue continue
} }
} }
not_cached = append(not_cached, field)
} }
} else {
not_cached = fields fields[ext_type] = append(ext_fields, name)
} }
return fields, nil
}
if (len(not_cached) == 0) && (node_cached == true) { func ResolveNode(id NodeID, p graphql.ResolveParams) (NodeResult, error) {
ctx.Context.Log.Logf("gql", "No new fields to resolve for %s", id) ctx, err := PrepResolve(p)
return cache, nil if err != nil {
} else { return NodeResult{}, err
ctx.Context.Log.Logf("gql", "Resolving fields %+v on node %s", not_cached, id) }
signal := NewReadSignal(not_cached)
response_chan := ctx.Ext.GetResponseChannel(signal.ID())
// TODO: TIMEOUT DURATION
err = ctx.Context.Send(ctx.Server, []Message{{
Node: id,
Signal: signal,
}})
if err != nil {
ctx.Ext.FreeResponseChannel(signal.ID())
return NodeResult{}, err
}
response, _, err := WaitForResponse(response_chan, 100*time.Millisecond, signal.ID()) fields, err := GetResolveFields(id, ctx, p)
if err != nil {
return NodeResult{}, err
}
ctx.Context.Log.Logf("gql", "Resolving fields %+v on node %s", fields, id)
signal := NewReadSignal(fields)
response_chan := ctx.Ext.GetResponseChannel(signal.ID())
// TODO: TIMEOUT DURATION
err = ctx.Context.Send(ctx.Server, []SendMsg{{
Dest: id,
Signal: signal,
}})
if err != nil {
ctx.Ext.FreeResponseChannel(signal.ID()) ctx.Ext.FreeResponseChannel(signal.ID())
if err != nil { return NodeResult{}, err
return NodeResult{}, err }
}
switch response := response.(type) { response, _, err := WaitForResponse(response_chan, 100*time.Millisecond, signal.ID())
case *ReadResultSignal: ctx.Ext.FreeResponseChannel(signal.ID())
if node_cached == false { if err != nil {
cache = NodeResult{ return NodeResult{}, err
NodeID: id, }
NodeType: response.NodeType,
Data: response.Fields, switch response := response.(type) {
} case *ReadResultSignal:
} else { cache, node_cached := ctx.NodeCache[id]
for field_name, field_value := range(response.Fields) { if node_cached == false {
cache.Data[field_name] = field_value cache = NodeResult{
} NodeID: id,
NodeType: response.NodeType,
Data: response.Extensions,
} }
} else {
for ext_type, ext_data := range(response.Extensions) {
cached_ext, ext_cached := cache.Data[ext_type]
if ext_cached {
for field_name, field := range(ext_data) {
cache.Data[ext_type][field_name] = field
}
} else {
cache.Data[ext_type] = ext_data
}
ctx.NodeCache[id] = cache cache.Data[ext_type] = cached_ext
return ctx.NodeCache[id], nil }
default:
return NodeResult{}, fmt.Errorf("Bad read response: %+v", response)
} }
ctx.NodeCache[id] = cache
return ctx.NodeCache[id], nil
default:
return NodeResult{}, fmt.Errorf("Bad read response: %+v", response)
} }
} }

@ -1,49 +1,162 @@
package graphvent package graphvent
import ( /*import (
"bytes" "testing"
"crypto/tls" "time"
"encoding/json" "fmt"
"fmt" "encoding/json"
"io" "io"
"net" "net/http"
"net/http" "net"
"reflect" "crypto/tls"
"testing" "crypto/rand"
"time" "crypto/ed25519"
"bytes"
"github.com/google/uuid" "golang.org/x/net/websocket"
"golang.org/x/net/websocket" "github.com/google/uuid"
) )
func TestGQLSubscribe(t *testing.T) { func TestGQLAuth(t *testing.T) {
ctx := logTestContext(t, []string{"test", "gql"}) ctx := logTestContext(t, []string{"test"})
n1, err := ctx.NewNode(nil, "LockableNode", NewLockableExt(nil)) listener_1 := NewListenerExt(10)
node_1, err := NewNode(ctx, nil, "Base", 10, nil, listener_1)
fatalErr(t, err)
listener_2 := NewListenerExt(10)
node_2, err := NewNode(ctx, nil, "Base", 10, nil, listener_2)
fatalErr(t, err) fatalErr(t, err)
listener_ext := NewListenerExt(10) auth_header, err := AuthB64(node_1.Key, node_2.Key.Public().(ed25519.PublicKey))
fatalErr(t, err)
auth, err := ParseAuthB64(auth_header, node_2.Key)
fatalErr(t, err)
err = ValidateAuthorization(Authorization{
AuthInfo: auth.AuthInfo,
Key: auth.Key.Public().(ed25519.PublicKey),
}, time.Second)
fatalErr(t, err)
ctx.Log.Logf("test", "AUTH: %+v", auth)
}
func TestGQLServer(t *testing.T) {
ctx := logTestContext(t, []string{"test", "gqlws", "gql"})
pub, gql_key, err := ed25519.GenerateKey(rand.Reader)
fatalErr(t, err)
gql_id := KeyID(pub)
group_policy_1 := NewAllNodesPolicy(Tree{
SerializedType(SignalTypeFor[ReadSignal]()): Tree{
SerializedType(ExtTypeFor[GroupExt]()): Tree{
SerializedType(GetFieldTag("members")): Tree{},
},
},
SerializedType(SignalTypeFor[ReadResultSignal]()): nil,
SerializedType(SignalTypeFor[ErrorSignal]()): nil,
})
group_policy_2 := NewMemberOfPolicy(map[NodeID]map[string]Tree{
gql_id: {
"test_group": {
SerializedType(SignalTypeFor[LinkSignal]()): nil,
SerializedType(SignalTypeFor[LockSignal]()): nil,
SerializedType(SignalTypeFor[StatusSignal]()): nil,
SerializedType(SignalTypeFor[ReadSignal]()): nil,
},
},
})
user_policy_1 := NewAllNodesPolicy(Tree{
SerializedType(SignalTypeFor[ReadResultSignal]()): nil,
SerializedType(SignalTypeFor[ErrorSignal]()): nil,
})
user_policy_2 := NewMemberOfPolicy(map[NodeID]map[string]Tree{
gql_id: {
"test_group": {
SerializedType(SignalTypeFor[LinkSignal]()): nil,
SerializedType(SignalTypeFor[ReadSignal]()): nil,
SerializedType(SignalTypeFor[LockSignal]()): nil,
},
},
})
gql_ext, err := NewGQLExt(ctx, ":0", nil, nil) gql_ext, err := NewGQLExt(ctx, ":0", nil, nil)
fatalErr(t, err) fatalErr(t, err)
gql, err := ctx.NewNode(nil, "LockableNode", NewLockableExt([]NodeID{n1.ID}), gql_ext, listener_ext) listener_ext := NewListenerExt(10)
n1, err := NewNode(ctx, nil, "Base", 10, []Policy{user_policy_2, user_policy_1}, NewLockableExt(nil))
fatalErr(t, err) fatalErr(t, err)
query := "subscription { Self { ID, Type ... on Lockable { LockableState } } }" gql, err := NewNode(ctx, gql_key, "Base", 10, []Policy{group_policy_2, group_policy_1},
NewLockableExt([]NodeID{n1.ID}), gql_ext, NewGroupExt(map[string][]NodeID{"test_group": {n1.ID, gql_id}}), listener_ext)
fatalErr(t, err)
ctx.Log.Logf("test", "GQL: %s", gql.ID) ctx.Log.Logf("test", "GQL: %s", gql.ID)
ctx.Log.Logf("test", "Node: %s", n1.ID) ctx.Log.Logf("test", "NODE: %s", n1.ID)
ctx.Log.Logf("test", "Query: %s", query)
sub_1 := GQLPayload{ _, err = WaitForSignal(listener_ext.Chan, 100*time.Millisecond, func(sig *StatusSignal) bool {
Query: query, return sig.Source == gql_id
} })
fatalErr(t, err)
skipVerifyTransport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: skipVerifyTransport}
port := gql_ext.tcp_listener.Addr().(*net.TCPAddr).Port port := gql_ext.tcp_listener.Addr().(*net.TCPAddr).Port
url := fmt.Sprintf("http://localhost:%d/gql", port) url := fmt.Sprintf("http://localhost:%d/gql", port)
ws_url := fmt.Sprintf("ws://127.0.0.1:%d/gqlws", port) ws_url := fmt.Sprintf("ws://127.0.0.1:%d/gqlws", port)
req_1 := GQLPayload{
Query: "query Node($id:String) { Node(id:$id) { ID, TypeHash } }",
Variables: map[string]interface{}{
"id": n1.ID.String(),
},
}
req_2 := GQLPayload{
Query: "query Node($id:String) { Node(id:$id) { ID, TypeHash, ... on GQLServer { SubGroups { Name, Members { ID } } , Listen, Requirements { ID, TypeHash Owner { ID } } } } }",
Variables: map[string]interface{}{
"id": gql.ID.String(),
},
}
auth_header, err := AuthB64(n1.Key, gql.Key.Public().(ed25519.PublicKey))
fatalErr(t, err)
SendGQL := func(payload GQLPayload) []byte {
ser, err := json.MarshalIndent(&payload, "", " ")
fatalErr(t, err)
req_data := bytes.NewBuffer(ser)
req, err := http.NewRequest("GET", url, req_data)
fatalErr(t, err)
req.Header.Add("Authorization", auth_header)
resp, err := client.Do(req)
fatalErr(t, err)
body, err := io.ReadAll(resp.Body)
fatalErr(t, err)
resp.Body.Close()
return body
}
resp_1 := SendGQL(req_1)
ctx.Log.Logf("test", "RESP_1: %s", resp_1)
resp_2 := SendGQL(req_2)
ctx.Log.Logf("test", "RESP_2: %s", resp_2)
sub_1 := GQLPayload{
Query: "subscription { Self { ID, TypeHash, ... on Lockable { Requirements { ID }}}}",
}
SubGQL := func(payload GQLPayload) { SubGQL := func(payload GQLPayload) {
config, err := websocket.NewConfig(ws_url, url) config, err := websocket.NewConfig(ws_url, url)
fatalErr(t, err) fatalErr(t, err)
@ -61,9 +174,11 @@ func TestGQLSubscribe(t *testing.T) {
init := struct{ init := struct{
ID uuid.UUID `json:"id"` ID uuid.UUID `json:"id"`
Type string `json:"type"` Type string `json:"type"`
Payload payload_struct `json:"payload"`
}{ }{
uuid.New(), uuid.New(),
"connection_init", "connection_init",
payload_struct{ auth_header },
} }
ser, err := json.Marshal(&init) ser, err := json.Marshal(&init)
@ -96,128 +211,75 @@ func TestGQLSubscribe(t *testing.T) {
n, err = ws.Read(resp) n, err = ws.Read(resp)
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "SUB1: %s", resp[:n]) ctx.Log.Logf("test", "SUB: %s", resp[:n])
lock_id, err := LockLockable(ctx, gql)
fatalErr(t, err)
response, _, err := WaitForResponse(listener_ext.Chan, 100*time.Millisecond, lock_id) msgs := Messages{}
test_changes := Changes{}
AddChange[GQLExt](test_changes, "state")
msgs = msgs.Add(ctx, gql.ID, gql, nil, NewStatusSignal(gql.ID, test_changes))
err = ctx.Send(msgs)
fatalErr(t, err) fatalErr(t, err)
switch response.(type) {
case *SuccessSignal:
ctx.Log.Logf("test", "Locked %s", gql.ID)
default:
t.Errorf("Unexpected lock response: %s", response)
}
n, err = ws.Read(resp)
fatalErr(t, err)
ctx.Log.Logf("test", "SUB2: %s", resp[:n])
n, err = ws.Read(resp) n, err = ws.Read(resp)
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "SUB3: %s", resp[:n]) ctx.Log.Logf("test", "SUB: %s", resp[:n])
// TODO: check that there are no more messages sent to ws within a timeout // TODO: check that there are no more messages sent to ws within a timeout
} }
SubGQL(sub_1) SubGQL(sub_1)
}
func TestGQLQuery(t *testing.T) {
ctx := logTestContext(t, []string{"test", "lockable"})
n1_listener := NewListenerExt(10) msgs := Messages{}
n1, err := ctx.NewNode(nil, "LockableNode", NewLockableExt(nil), n1_listener) msgs = msgs.Add(ctx, gql.ID, gql, nil, NewStopSignal())
err = ctx.Send(msgs)
fatalErr(t, err) fatalErr(t, err)
_, err = WaitForSignal(listener_ext.Chan, 100*time.Millisecond, func(sig *StoppedSignal) bool {
gql_listener := NewListenerExt(10) return sig.Source == gql_id
gql_ext, err := NewGQLExt(ctx, ":0", nil, nil) })
fatalErr(t, err) fatalErr(t, err)
}
gql, err := ctx.NewNode(nil, "LockableNode", NewLockableExt([]NodeID{n1.ID}), gql_ext, gql_listener) func TestGQLDB(t *testing.T) {
fatalErr(t, err) ctx := logTestContext(t, []string{"test", "db", "node"})
ctx.Log.Logf("test", "GQL: %s", gql.ID)
ctx.Log.Logf("test", "NODE: %s", n1.ID)
skipVerifyTransport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: skipVerifyTransport}
port := gql_ext.tcp_listener.Addr().(*net.TCPAddr).Port
url := fmt.Sprintf("http://localhost:%d/gql", port)
req_1 := GQLPayload{
Query: "query Node($id:graphvent_NodeID) { Node(id:$id) { ID, Type, ... on Lockable { LockableState } } }",
Variables: map[string]interface{}{
"id": n1.ID.String(),
},
}
req_2 := GQLPayload{
Query: "query Self { Self { ID, Type, ... on Lockable { LockableState, Requirements { Key { ID ... on Lockable { LockableState } } } } } }",
}
SendGQL := func(payload GQLPayload) []byte {
ser, err := json.MarshalIndent(&payload, "", " ")
fatalErr(t, err)
req_data := bytes.NewBuffer(ser)
req, err := http.NewRequest("GET", url, req_data)
fatalErr(t, err)
resp, err := client.Do(req)
fatalErr(t, err)
body, err := io.ReadAll(resp.Body)
fatalErr(t, err)
resp.Body.Close()
return body
}
resp_1 := SendGQL(req_1)
ctx.Log.Logf("test", "RESP_1: %s", resp_1)
resp_2 := SendGQL(req_2)
ctx.Log.Logf("test", "RESP_2: %s", resp_2)
lock_id, err := LockLockable(ctx, n1)
fatalErr(t, err)
response, _, err := WaitForResponse(n1_listener.Chan, 100*time.Millisecond, lock_id) u1, err := NewNode(ctx, nil, "Base", 10, nil)
fatalErr(t, err) fatalErr(t, err)
switch response := response.(type) {
case *SuccessSignal:
default:
t.Fatalf("Wrong response: %s", reflect.TypeOf(response))
}
resp_3 := SendGQL(req_1)
ctx.Log.Logf("test", "RESP_3: %s", resp_3)
resp_4 := SendGQL(req_2)
ctx.Log.Logf("test", "RESP_4: %s", resp_4)
}
func TestGQLDB(t *testing.T) { ctx.Log.Logf("test", "U1_ID: %s", u1.ID)
ctx := logTestContext(t, []string{"test", "db", "node", "serialize"})
gql_ext, err := NewGQLExt(ctx, ":0", nil, nil) gql_ext, err := NewGQLExt(ctx, ":0", nil, nil)
fatalErr(t, err) fatalErr(t, err)
listener_ext := NewListenerExt(10) listener_ext := NewListenerExt(10)
gql, err := NewNode(ctx, nil, "Base", 10, nil,
gql, err := ctx.NewNode(nil, "Node", gql_ext, listener_ext) gql_ext,
listener_ext,
NewGroupExt(nil))
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "GQL_ID: %s", gql.ID) ctx.Log.Logf("test", "GQL_ID: %s", gql.ID)
err = ctx.Stop() msgs := Messages{}
msgs = msgs.Add(ctx, gql.ID, gql, nil, NewStopSignal())
err = ctx.Send(msgs)
fatalErr(t, err)
_, err = WaitForSignal(listener_ext.Chan, 100*time.Millisecond, func(sig *StoppedSignal) bool {
return sig.Source == gql.ID
})
fatalErr(t, err) fatalErr(t, err)
gql_loaded, err := ctx.GetNode(gql.ID) // Clear all loaded nodes from the context so it loads them from the database
ctx.nodeMap = map[NodeID]*Node{}
gql_loaded, err := LoadNode(ctx, gql.ID)
fatalErr(t, err) fatalErr(t, err)
listener_ext, err = GetExt[ListenerExt](gql_loaded) listener_ext, err = GetExt[ListenerExt](gql_loaded)
fatalErr(t, err) fatalErr(t, err)
msgs = Messages{}
msgs = msgs.Add(ctx, gql_loaded.ID, gql_loaded, nil, NewStopSignal())
err = ctx.Send(msgs)
fatalErr(t, err)
_, err = WaitForSignal(listener_ext.Chan, 100*time.Millisecond, func(sig *StoppedSignal) bool {
return sig.Source == gql_loaded.ID
})
fatalErr(t, err)
} }
*/

@ -9,20 +9,27 @@ import (
func NewSimpleListener(ctx *Context, buffer int) (*Node, *ListenerExt, error) { func NewSimpleListener(ctx *Context, buffer int) (*Node, *ListenerExt, error) {
listener_extension := NewListenerExt(buffer) listener_extension := NewListenerExt(buffer)
listener, err := ctx.NewNode(nil, "LockableNode", nil, listener_extension, NewLockableExt(nil)) listener, err := NewNode(ctx,
nil,
"LockableListener",
10,
nil,
listener_extension,
NewLockableExt(nil))
return listener, listener_extension, err return listener, listener_extension, err
} }
func logTestContext(t * testing.T, components []string) *Context { func logTestContext(t * testing.T, components []string) *Context {
db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true).WithSyncWrites(true)) db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ctx, err := NewContext(&BadgerDB{ ctx, err := NewContext(db, NewConsoleLogger(components))
DB: db, fatalErr(t, err)
}, NewConsoleLogger(components))
err = RegisterNodeType(ctx, "LockableListener", []ExtType{ExtTypeFor[ListenerExt](), ExtTypeFor[LockableExt]()})
fatalErr(t, err) fatalErr(t, err)
return ctx return ctx
@ -43,7 +50,7 @@ func testSend(t *testing.T, ctx *Context, signal Signal, source, destination *No
source_listener, err := GetExt[ListenerExt](source) source_listener, err := GetExt[ListenerExt](source)
fatalErr(t, err) fatalErr(t, err)
messages := []Message{{destination.ID, signal}} messages := []SendMsg{{destination.ID, signal}}
fatalErr(t, ctx.Send(source, messages)) fatalErr(t, ctx.Send(source, messages))
response, signals, err := WaitForResponse(source_listener.Chan, time.Millisecond*10, signal.ID()) response, signals, err := WaitForResponse(source_listener.Chan, time.Millisecond*10, signal.ID())

@ -10,35 +10,12 @@ type ListenerExt struct {
Chan chan Signal Chan chan Signal
} }
type LoadedSignal struct {
SignalHeader
}
func NewLoadedSignal() *LoadedSignal {
return &LoadedSignal{
SignalHeader: NewSignalHeader(),
}
}
type UnloadedSignal struct {
SignalHeader
}
func NewUnloadedSignal() *UnloadedSignal {
return &UnloadedSignal{
SignalHeader: NewSignalHeader(),
}
}
func (ext *ListenerExt) Load(ctx *Context, node *Node) error { func (ext *ListenerExt) Load(ctx *Context, node *Node) error {
ext.Chan = make(chan Signal, ext.Buffer) ext.Chan = make(chan Signal, ext.Buffer)
ext.Chan <- NewLoadedSignal()
return nil return nil
} }
func (ext *ListenerExt) Unload(ctx *Context, node *Node) { func (ext *ListenerExt) Unload(ctx *Context, node *Node) {
ext.Chan <- NewUnloadedSignal()
close(ext.Chan)
} }
// Create a new listener extension with a given buffer size // Create a new listener extension with a given buffer size
@ -50,7 +27,7 @@ func NewListenerExt(buffer int) *ListenerExt {
} }
// Send the signal to the channel, logging an overflow if it occurs // Send the signal to the channel, logging an overflow if it occurs
func (ext *ListenerExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]Message, Changes) { func (ext *ListenerExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]SendMsg, Changes) {
ctx.Log.Logf("listener", "%s - %+v", node.ID, reflect.TypeOf(signal)) ctx.Log.Logf("listener", "%s - %+v", node.ID, reflect.TypeOf(signal))
ctx.Log.Logf("listener_debug", "%s->%s - %+v", source, node.ID, signal) ctx.Log.Logf("listener_debug", "%s->%s - %+v", source, node.ID, signal)
select { select {
@ -60,7 +37,7 @@ func (ext *ListenerExt) Process(ctx *Context, node *Node, source NodeID, signal
} }
switch sig := signal.(type) { switch sig := signal.(type) {
case *StatusSignal: case *StatusSignal:
ctx.Log.Logf("listener_status", "%s - %+v", sig.Source, sig.Fields) ctx.Log.Logf("listener_status", "%s - %+v", sig.Source, sig.Changes)
} }
return nil, nil return nil, nil
} }

@ -1,7 +1,8 @@
package graphvent package graphvent
import ( import (
"github.com/google/uuid" "github.com/google/uuid"
"time"
) )
type ReqState byte type ReqState byte
@ -21,389 +22,399 @@ var ReqStateStrings = map[ReqState]string {
AbortingLock: "AbortingLock", AbortingLock: "AbortingLock",
} }
func (state ReqState) String() string {
str, mapped := ReqStateStrings[state]
if mapped == false {
return "UNKNOWN_REQSTATE"
} else {
return str
}
}
type LockableExt struct{ type LockableExt struct{
State ReqState `gv:"state"` State ReqState `gv:"state"`
ReqID *uuid.UUID `gv:"req_id"` ReqID *uuid.UUID `gv:"req_id"`
Owner *NodeID `gv:"owner"` Owner *NodeID `gv:"owner" node:"Base"`
PendingOwner *NodeID `gv:"pending_owner"` PendingOwner *NodeID `gv:"pending_owner" node:"Base"`
PendingID uuid.UUID `gv:"pending_id"`
Requirements map[NodeID]ReqState `gv:"requirements" node:"Lockable:"` Requirements map[NodeID]ReqState `gv:"requirements" node:"Lockable:"`
WaitInfos WaitMap `gv:"wait_infos" node:":Base"`
Locked map[NodeID]any
Unlocked map[NodeID]any
Waiting WaitMap `gv:"waiting_locks" node:":Lockable"`
} }
func NewLockableExt(requirements []NodeID) *LockableExt { func NewLockableExt(requirements []NodeID) *LockableExt {
var reqs map[NodeID]ReqState = nil var reqs map[NodeID]ReqState = nil
var unlocked map[NodeID]any = map[NodeID]any{} if requirements != nil {
if len(requirements) != 0 {
reqs = map[NodeID]ReqState{} reqs = map[NodeID]ReqState{}
for _, req := range(requirements) { for _, id := range(requirements) {
reqs[req] = Unlocked reqs[id] = Unlocked
unlocked[req] = nil
} }
} }
return &LockableExt{ return &LockableExt{
State: Unlocked, State: Unlocked,
Owner: nil, Owner: nil,
PendingOwner: nil, PendingOwner: nil,
Requirements: reqs, Requirements: reqs,
Waiting: WaitMap{}, WaitInfos: WaitMap{},
Locked: map[NodeID]any{},
Unlocked: unlocked,
} }
} }
func UnlockLockable(ctx *Context, node *Node) (uuid.UUID, error) { func UnlockLockable(ctx *Context, node *Node) (uuid.UUID, error) {
signal := NewUnlockSignal() signal := NewLockSignal("unlock")
messages := []Message{{node.ID, signal}} messages := []SendMsg{{node.ID, signal}}
return signal.ID(), ctx.Send(node, messages) return signal.ID(), ctx.Send(node, messages)
} }
func LockLockable(ctx *Context, node *Node) (uuid.UUID, error) { func LockLockable(ctx *Context, node *Node) (uuid.UUID, error) {
signal := NewLockSignal() signal := NewLockSignal("lock")
messages := []Message{{node.ID, signal}} messages := []SendMsg{{node.ID, signal}}
return signal.ID(), ctx.Send(node, messages) return signal.ID(), ctx.Send(node, messages)
} }
func (ext *LockableExt) Load(ctx *Context, node *Node) error { func (ext *LockableExt) Load(ctx *Context, node *Node) error {
ext.Locked = map[NodeID]any{}
ext.Unlocked = map[NodeID]any{}
for id, state := range(ext.Requirements) {
if state == Unlocked {
ext.Unlocked[id] = nil
} else if state == Locked {
ext.Locked[id] = nil
}
}
return nil return nil
} }
func (ext *LockableExt) Unload(ctx *Context, node *Node) { func (ext *LockableExt) Unload(ctx *Context, node *Node) {
return
} }
// Handle link signal by adding/removing the requested NodeID func (ext *LockableExt) HandleErrorSignal(ctx *Context, node *Node, source NodeID, signal *ErrorSignal) ([]SendMsg, Changes) {
// returns an error if the node is not unlocked var messages []SendMsg = nil
func (ext *LockableExt) HandleLinkSignal(ctx *Context, node *Node, source NodeID, signal *LinkSignal) ([]Message, Changes) {
var messages []Message = nil
var changes Changes = nil var changes Changes = nil
switch ext.State { info, info_found := node.ProcessResponse(ext.WaitInfos, signal)
case Unlocked: if info_found {
state, found := ext.Requirements[info.Destination]
if found == true {
changes.Add("wait_infos")
ctx.Log.Logf("lockable", "got mapped response %+v for %+v in state %s while in %s", signal, info, ReqStateStrings[state], ReqStateStrings[ext.State])
switch ext.State {
case AbortingLock:
ext.Requirements[info.Destination] = Unlocked
all_unlocked := true
for _, state := range(ext.Requirements) {
if state != Unlocked {
all_unlocked = false
break
}
}
if all_unlocked == true {
changes.Add("state")
ext.State = Unlocked
}
case Locking:
changes.Add("state")
ext.Requirements[info.Destination] = Unlocked
unlocked := 0
for _, state := range(ext.Requirements) {
if state == Unlocked {
unlocked += 1
}
}
if unlocked == len(ext.Requirements) {
ctx.Log.Logf("lockable", "%s unlocked from error %s from %s", node.ID, signal.Error, source)
ext.State = Unlocked
} else {
ext.State = AbortingLock
for id, state := range(ext.Requirements) {
if state == Locked {
ext.Requirements[id] = Unlocking
lock_signal := NewLockSignal("unlock")
ext.WaitInfos[lock_signal.Id] = node.QueueTimeout("unlock", id, lock_signal, 100*time.Millisecond)
messages = append(messages, SendMsg{id, lock_signal})
ctx.Log.Logf("lockable", "sent abort unlock to %s from %s", id, node.ID)
}
}
}
case Unlocking:
ext.Requirements[info.Destination] = Locked
all_returned := true
for _, state := range(ext.Requirements) {
if state == Unlocking {
all_returned = false
break
}
}
if all_returned == true {
ext.State = Locked
}
}
} else {
ctx.Log.Logf("lockable", "Got mapped error %s, but %s isn't a requirement", signal, info.Destination)
}
}
return messages, changes
}
func (ext *LockableExt) HandleLinkSignal(ctx *Context, node *Node, source NodeID, signal *LinkSignal) ([]SendMsg, Changes) {
var messages []SendMsg = nil
var changes = Changes{}
if ext.State == Unlocked {
switch signal.Action { switch signal.Action {
case "add": case "add":
_, exists := ext.Requirements[signal.NodeID] _, exists := ext.Requirements[signal.NodeID]
if exists == true { if exists == true {
messages = append(messages, Message{source, NewErrorSignal(signal.ID(), "already_requirement")}) messages = append(messages, SendMsg{source, NewErrorSignal(signal.ID(), "already_requirement")})
} else { } else {
if ext.Requirements == nil { if ext.Requirements == nil {
ext.Requirements = map[NodeID]ReqState{} ext.Requirements = map[NodeID]ReqState{}
} }
ext.Requirements[signal.NodeID] = Unlocked ext.Requirements[signal.NodeID] = Unlocked
changes = append(changes, "requirements") changes.Add("requirements")
messages = append(messages, Message{source, NewSuccessSignal(signal.ID())}) messages = append(messages, SendMsg{source, NewSuccessSignal(signal.ID())})
} }
case "remove": case "remove":
_, exists := ext.Requirements[signal.NodeID] _, exists := ext.Requirements[signal.NodeID]
if exists == false { if exists == false {
messages = append(messages, Message{source, NewErrorSignal(signal.ID(), "not_requirement")}) messages = append(messages, SendMsg{source, NewErrorSignal(signal.ID(), "can't link: not_requirement")})
} else { } else {
delete(ext.Requirements, signal.NodeID) delete(ext.Requirements, signal.NodeID)
changes = append(changes, "requirements") changes.Add("requirements")
messages = append(messages, Message{source, NewSuccessSignal(signal.ID())}) messages = append(messages, SendMsg{source, NewSuccessSignal(signal.ID())})
} }
default: default:
messages = append(messages, Message{source, NewErrorSignal(signal.ID(), "unknown_action")}) messages = append(messages, SendMsg{source, NewErrorSignal(signal.ID(), "unknown_action")})
} }
default: } else {
messages = append(messages, Message{source, NewErrorSignal(signal.ID(), "not_unlocked: %s", ext.State)}) messages = append(messages, SendMsg{source, NewErrorSignal(signal.ID(), "not_unlocked")})
} }
return messages, changes return messages, changes
} }
// Handle an UnlockSignal by either transitioning to Unlocked state, func (ext *LockableExt) HandleSuccessSignal(ctx *Context, node *Node, source NodeID, signal *SuccessSignal) ([]SendMsg, Changes) {
// sending unlock signals to requirements, or returning an error signal var messages []SendMsg = nil
func (ext *LockableExt) HandleUnlockSignal(ctx *Context, node *Node, source NodeID, signal *UnlockSignal) ([]Message, Changes) { var changes = Changes{}
var messages []Message = nil if source == node.ID {
var changes Changes = nil return messages, changes
switch ext.State {
case Locked:
if source != *ext.Owner {
messages = append(messages, Message{source, NewErrorSignal(signal.Id, "not_owner")})
} else {
if len(ext.Requirements) == 0 {
changes = append(changes, "state", "owner", "pending_owner")
ext.Owner = nil
ext.PendingOwner = nil
ext.State = Unlocked
messages = append(messages, Message{source, NewSuccessSignal(signal.Id)})
} else {
changes = append(changes, "state", "waiting", "requirements", "pending_owner")
ext.PendingOwner = nil
ext.ReqID = &signal.Id
ext.State = Unlocking
for id := range(ext.Requirements) {
unlock_signal := NewUnlockSignal()
ext.Waiting[unlock_signal.Id] = id
ext.Requirements[id] = Unlocking
messages = append(messages, Message{id, unlock_signal})
}
}
}
default:
messages = append(messages, Message{source, NewErrorSignal(signal.Id, "not_locked")})
} }
return messages, changes info, info_found := node.ProcessResponse(ext.WaitInfos, signal)
} if info_found == true {
state, found := ext.Requirements[info.Destination]
// Handle a LockSignal by either transitioning to a locked state, if found == false {
// sending lock signals to requirements, or returning an error signal ctx.Log.Logf("lockable", "Got success signal for requirement that is no longer in the map(%s), ignoring...", info.Destination)
func (ext *LockableExt) HandleLockSignal(ctx *Context, node *Node, source NodeID, signal *LockSignal) ([]Message, Changes) {
var messages []Message = nil
var changes Changes = nil
switch ext.State {
case Unlocked:
if len(ext.Requirements) == 0 {
changes = append(changes, "state", "owner", "pending_owner")
ext.Owner = &source
ext.PendingOwner = &source
ext.State = Locked
messages = append(messages, Message{source, NewSuccessSignal(signal.Id)})
} else { } else {
changes = append(changes, "state", "requirements", "waiting", "pending_owner") ctx.Log.Logf("lockable", "got mapped response %+v for %+v in state %s", signal, info, ReqStateStrings[state])
switch state {
ext.PendingOwner = &source case Locking:
switch ext.State {
case Locking:
ext.Requirements[info.Destination] = Locked
locked := 0
for _, s := range(ext.Requirements) {
if s == Locked {
locked += 1
}
}
if locked == len(ext.Requirements) {
ctx.Log.Logf("lockable", "WHOLE LOCK: %s - %s - %+v", node.ID, ext.PendingID, ext.PendingOwner)
ext.State = Locked
ext.Owner = ext.PendingOwner
changes.Add("state", "owner", "requirements")
messages = append(messages, SendMsg{*ext.Owner, NewSuccessSignal(ext.PendingID)})
} else {
changes.Add("requirements")
ctx.Log.Logf("lockable", "PARTIAL LOCK: %s - %d/%d", node.ID, locked, len(ext.Requirements))
}
case AbortingLock:
ext.Requirements[info.Destination] = Unlocking
ext.ReqID = &signal.Id lock_signal := NewLockSignal("unlock")
ext.WaitInfos[lock_signal.Id] = node.QueueTimeout("unlock", info.Destination, lock_signal, 100*time.Millisecond)
messages = append(messages, SendMsg{info.Destination, lock_signal})
ext.State = Locking ctx.Log.Logf("lockable", "sending abort_lock to %s for %s", info.Destination, node.ID)
for id := range(ext.Requirements) { }
lock_signal := NewLockSignal() case AbortingLock:
ctx.Log.Logf("lockable", "Got success signal in AbortingLock %s", node.ID)
fallthrough
case Unlocking:
ext.Requirements[source] = Unlocked
ext.Waiting[lock_signal.Id] = id unlocked := 0
ext.Requirements[id] = Locking for _, s := range(ext.Requirements) {
if s == Unlocked {
unlocked += 1
}
}
messages = append(messages, Message{id, lock_signal}) if unlocked == len(ext.Requirements) {
old_state := ext.State
ext.State = Unlocked
ctx.Log.Logf("lockable", "WHOLE UNLOCK: %s - %s - %+v", node.ID, ext.PendingID, ext.PendingOwner)
if old_state == Unlocking {
previous_owner := *ext.Owner
ext.Owner = ext.PendingOwner
ext.ReqID = nil
changes.Add("state", "owner", "req_id")
messages = append(messages, SendMsg{previous_owner, NewSuccessSignal(ext.PendingID)})
} else if old_state == AbortingLock {
changes.Add("state", "pending_owner")
messages = append(messages, SendMsg{*ext.PendingOwner, NewErrorSignal(*ext.ReqID, "not_unlocked")})
ext.PendingOwner = ext.Owner
}
} else {
changes.Add("state")
ctx.Log.Logf("lockable", "PARTIAL UNLOCK: %s - %d/%d", node.ID, unlocked, len(ext.Requirements))
}
} }
} }
default:
messages = append(messages, Message{source, NewErrorSignal(signal.Id, "not_unlocked: %s", ext.State)})
} }
return messages, changes return messages, changes
} }
// Handle an error signal by aborting the lock, or retrying the unlock // Handle a LockSignal and update the extensions owner/requirement states
func (ext *LockableExt) HandleErrorSignal(ctx *Context, node *Node, source NodeID, signal *ErrorSignal) ([]Message, Changes) { func (ext *LockableExt) HandleLockSignal(ctx *Context, node *Node, source NodeID, signal *LockSignal) ([]SendMsg, Changes) {
var messages []Message = nil var messages []SendMsg = nil
var changes Changes = nil var changes = Changes{}
id, waiting := ext.Waiting[signal.ReqID]
if waiting == true {
delete(ext.Waiting, signal.ReqID)
changes = append(changes, "waiting")
switch signal.State {
case "lock":
switch ext.State { switch ext.State {
case Locking: case Unlocked:
changes = append(changes, "state", "requirements") if len(ext.Requirements) == 0 {
ext.State = Locked
ext.Requirements[id] = Unlocked new_owner := source
ext.PendingOwner = &new_owner
unlocked := 0 ext.Owner = &new_owner
for req_id, req_state := range(ext.Requirements) { changes.Add("state", "pending_owner", "owner")
// Unlock locked requirements, and count unlocked requirements messages = append(messages, SendMsg{new_owner, NewSuccessSignal(signal.ID())})
switch req_state { } else {
case Locked: ext.State = Locking
unlock_signal := NewUnlockSignal() id := signal.ID()
ext.ReqID = &id
new_owner := source
ext.PendingOwner = &new_owner
ext.PendingID = signal.ID()
changes.Add("state", "req_id", "pending_owner", "pending_id")
for id, state := range(ext.Requirements) {
if state != Unlocked {
ctx.Log.Logf("lockable", "REQ_NOT_UNLOCKED_WHEN_LOCKING")
}
ext.Waiting[unlock_signal.Id] = req_id lock_signal := NewLockSignal("lock")
ext.Requirements[req_id] = Unlocking ext.WaitInfos[lock_signal.Id] = node.QueueTimeout("lock", id, lock_signal, 500*time.Millisecond)
ext.Requirements[id] = Locking
messages = append(messages, Message{req_id, unlock_signal}) messages = append(messages, SendMsg{id, lock_signal})
case Unlocked:
unlocked += 1
} }
} }
default:
if unlocked == len(ext.Requirements) { messages = append(messages, SendMsg{source, NewErrorSignal(signal.ID(), "not_unlocked")})
changes = append(changes, "owner", "state") ctx.Log.Logf("lockable", "Tried to lock %s while %s", node.ID, ext.State)
}
case "unlock":
if ext.State == Locked {
if len(ext.Requirements) == 0 {
ext.State = Unlocked ext.State = Unlocked
new_owner := source
ext.PendingOwner = nil
ext.Owner = nil ext.Owner = nil
} else { changes.Add("state", "pending_owner", "owner")
changes = append(changes, "state") messages = append(messages, SendMsg{new_owner, NewSuccessSignal(signal.ID())})
ext.State = AbortingLock } else if source == *ext.Owner {
} ext.State = Unlocking
id := signal.ID()
case Unlocking: ext.ReqID = &id
unlock_signal := NewUnlockSignal() ext.PendingOwner = nil
ext.Waiting[unlock_signal.Id] = id ext.PendingID = signal.ID()
messages = append(messages, Message{id, unlock_signal}) changes.Add("state", "pending_owner", "pending_id", "req_id")
for id, state := range(ext.Requirements) {
case AbortingLock: if state != Locked {
req_state := ext.Requirements[id] ctx.Log.Logf("lockable", "REQ_NOT_LOCKED_WHEN_UNLOCKING")
// Mark failed lock as Unlocked, or retry unlock
switch req_state {
case Locking:
ext.Requirements[id] = Unlocked
// Check if all requirements unlocked now
unlocked := 0
for _, req_state := range(ext.Requirements) {
if req_state == Unlocked {
unlocked += 1
} }
}
if unlocked == len(ext.Requirements) { lock_signal := NewLockSignal("unlock")
changes = append(changes, "owner", "state") ext.WaitInfos[lock_signal.Id] = node.QueueTimeout("unlock", id, lock_signal, 100*time.Millisecond)
ext.State = Unlocked ext.Requirements[id] = Unlocking
ext.Owner = nil
messages = append(messages, SendMsg{id, lock_signal})
} }
case Unlocking:
// Handle error for unlocking requirement while unlocking by retrying unlock
unlock_signal := NewUnlockSignal()
ext.Waiting[unlock_signal.Id] = id
messages = append(messages, Message{id, unlock_signal})
} }
} else {
messages = append(messages, SendMsg{source, NewErrorSignal(signal.ID(), "not_locked")})
} }
default:
ctx.Log.Logf("lockable", "LOCK_ERR: unkown state %s", signal.State)
} }
return messages, changes return messages, changes
} }
// Handle a success signal by checking if all requirements have been locked/unlocked func (ext *LockableExt) HandleTimeoutSignal(ctx *Context, node *Node, source NodeID, signal *TimeoutSignal) ([]SendMsg, Changes) {
func (ext *LockableExt) HandleSuccessSignal(ctx *Context, node *Node, source NodeID, signal *SuccessSignal) ([]Message, Changes) { var messages []SendMsg = nil
var messages []Message = nil var changes = Changes{}
var changes Changes = nil
wait_info, found := node.ProcessResponse(ext.WaitInfos, signal)
id, waiting := ext.Waiting[signal.ReqID] if found == true {
if waiting == true { changes.Add("wait_infos")
delete(ext.Waiting, signal.ReqID) state, found := ext.Requirements[wait_info.Destination]
changes = append(changes, "waiting") if found == true {
ctx.Log.Logf("lockable", "%s timed out %s while %s was %s", wait_info.Destination, ReqStateStrings[state], node.ID, ReqStateStrings[state])
switch ext.State { switch ext.State {
case Locking: case AbortingLock:
ext.Requirements[id] = Locked ext.Requirements[wait_info.Destination] = Unlocked
ext.Locked[id] = nil all_unlocked := true
delete(ext.Unlocked, id) for _, state := range(ext.Requirements) {
if state != Unlocked {
if len(ext.Locked) == len(ext.Requirements) { all_unlocked = false
ctx.Log.Logf("lockable", "%s FULL_LOCK: %d", node.ID, len(ext.Locked)) break
changes = append(changes, "state", "owner", "req_id") }
ext.State = Locked }
if all_unlocked == true {
ext.Owner = ext.PendingOwner changes.Add("state")
ext.State = Unlocked
messages = append(messages, Message{*ext.Owner, NewSuccessSignal(*ext.ReqID)}) }
ext.ReqID = nil
} else {
ctx.Log.Logf("lockable", "%s PARTIAL_LOCK: %d/%d", node.ID, len(ext.Locked), len(ext.Requirements))
}
case AbortingLock:
req_state := ext.Requirements[id]
switch req_state {
case Locking: case Locking:
ext.Requirements[id] = Unlocking ext.State = AbortingLock
unlock_signal := NewUnlockSignal() ext.Requirements[wait_info.Destination] = Unlocked
ext.Waiting[unlock_signal.Id] = id for id, state := range(ext.Requirements) {
messages = append(messages, Message{id, unlock_signal}) if state == Locked {
ext.Requirements[id] = Unlocking
lock_signal := NewLockSignal("unlock")
ext.WaitInfos[lock_signal.Id] = node.QueueTimeout("unlock", id, lock_signal, 100*time.Millisecond)
messages = append(messages, SendMsg{id, lock_signal})
ctx.Log.Logf("lockable", "sent abort unlock to %s from %s", id, node.ID)
}
}
case Unlocking: case Unlocking:
ext.Requirements[id] = Unlocked ext.Requirements[wait_info.Destination] = Locked
ext.Unlocked[id] = nil all_returned := true
delete(ext.Locked, id) for _, state := range(ext.Requirements) {
if state == Unlocking {
unlocked := 0 all_returned = false
for _, req_state := range(ext.Requirements) { break
switch req_state {
case Unlocked:
unlocked += 1
} }
} }
if all_returned == true {
if unlocked == len(ext.Requirements) { ext.State = Locked
changes = append(changes, "state", "pending_owner", "req_id")
messages = append(messages, Message{*ext.PendingOwner, NewErrorSignal(*ext.ReqID, "not_unlocked: %s", ext.State)})
ext.State = Unlocked
ext.ReqID = nil
ext.PendingOwner = nil
} }
} }
} else {
ctx.Log.Logf("lockable", "%s timed out", wait_info.Destination)
case Unlocking:
ext.Requirements[id] = Unlocked
ext.Unlocked[id] = Unlocked
delete(ext.Locked, id)
if len(ext.Unlocked) == len(ext.Requirements) {
changes = append(changes, "state", "owner", "req_id")
messages = append(messages, Message{*ext.Owner, NewSuccessSignal(*ext.ReqID)})
ext.State = Unlocked
ext.ReqID = nil
ext.Owner = nil
}
} }
} }
return messages, changes return messages, changes
} }
func (ext *LockableExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]Message, Changes) { // LockableExts process status signals by forwarding them to it's owner
var messages []Message = nil // LockSignal and LinkSignal Direct signals are processed to update the requirement/dependency/lock state
var changes Changes = nil func (ext *LockableExt) Process(ctx *Context, node *Node, source NodeID, signal Signal) ([]SendMsg, Changes) {
var messages []SendMsg = nil
var changes = Changes{}
switch sig := signal.(type) { switch sig := signal.(type) {
case *StatusSignal: case *StatusSignal:
// Forward StatusSignals up to the owner(unless that would be a cycle)
if ext.Owner != nil { if ext.Owner != nil {
if *ext.Owner != node.ID { if *ext.Owner != node.ID {
messages = append(messages, Message{*ext.Owner, signal}) messages = append(messages, SendMsg{*ext.Owner, signal})
} }
} }
case *LinkSignal: case *LinkSignal:
messages, changes = ext.HandleLinkSignal(ctx, node, source, sig) messages, changes = ext.HandleLinkSignal(ctx, node, source, sig)
case *LockSignal: case *LockSignal:
messages, changes = ext.HandleLockSignal(ctx, node, source, sig) messages, changes = ext.HandleLockSignal(ctx, node, source, sig)
case *UnlockSignal:
messages, changes = ext.HandleUnlockSignal(ctx, node, source, sig)
case *ErrorSignal: case *ErrorSignal:
messages, changes = ext.HandleErrorSignal(ctx, node, source, sig) messages, changes = ext.HandleErrorSignal(ctx, node, source, sig)
case *SuccessSignal: case *SuccessSignal:
messages, changes = ext.HandleSuccessSignal(ctx, node, source, sig) messages, changes = ext.HandleSuccessSignal(ctx, node, source, sig)
case *TimeoutSignal:
messages, changes = ext.HandleTimeoutSignal(ctx, node, source, sig)
default:
} }
return messages, changes return messages, changes

@ -10,16 +10,16 @@ func TestLink(t *testing.T) {
l2_listener := NewListenerExt(10) l2_listener := NewListenerExt(10)
l2, err := ctx.NewNode(nil, "LockableNode", l2_listener, NewLockableExt(nil)) l2, err := NewNode(ctx, nil, "Lockable", 10, l2_listener, NewLockableExt(nil))
fatalErr(t, err) fatalErr(t, err)
l1_lockable := NewLockableExt(nil) l1_lockable := NewLockableExt(nil)
l1_listener := NewListenerExt(10) l1_listener := NewListenerExt(10)
l1, err := ctx.NewNode(nil, "LockableNode", l1_listener, l1_lockable) l1, err := NewNode(ctx, nil, "Lockable", 10, l1_listener, l1_lockable)
fatalErr(t, err) fatalErr(t, err)
link_signal := NewLinkSignal("add", l2.ID) link_signal := NewLinkSignal("add", l2.ID)
msgs := []Message{{l1.ID, link_signal}} msgs := []SendMsg{{l1.ID, link_signal}}
err = ctx.Send(l1, msgs) err = ctx.Send(l1, msgs)
fatalErr(t, err) fatalErr(t, err)
@ -34,7 +34,7 @@ func TestLink(t *testing.T) {
} }
unlink_signal := NewLinkSignal("remove", l2.ID) unlink_signal := NewLinkSignal("remove", l2.ID)
msgs = []Message{{l1.ID, unlink_signal}} msgs = []SendMsg{{l1.ID, unlink_signal}}
err = ctx.Send(l1, msgs) err = ctx.Send(l1, msgs)
fatalErr(t, err) fatalErr(t, err)
@ -42,40 +42,24 @@ func TestLink(t *testing.T) {
fatalErr(t, err) fatalErr(t, err)
} }
func Test10Lock(t *testing.T) {
testLockN(t, 10)
}
func Test100Lock(t *testing.T) {
testLockN(t, 100)
}
func Test1000Lock(t *testing.T) { func Test1000Lock(t *testing.T) {
testLockN(t, 1000)
}
func Test10000Lock(t *testing.T) {
testLockN(t, 10000)
}
func testLockN(t *testing.T, n int) {
ctx := logTestContext(t, []string{"test"}) ctx := logTestContext(t, []string{"test"})
NewLockable := func()(*Node) { NewLockable := func()(*Node) {
l, err := ctx.NewNode(nil, "LockableNode", NewLockableExt(nil)) l, err := NewNode(ctx, nil, "Lockable", 10, NewLockableExt(nil))
fatalErr(t, err) fatalErr(t, err)
return l return l
} }
reqs := make([]NodeID, n) reqs := make([]NodeID, 1000)
for i := range(reqs) { for i := range(reqs) {
new_lockable := NewLockable() new_lockable := NewLockable()
reqs[i] = new_lockable.ID reqs[i] = new_lockable.ID
} }
ctx.Log.Logf("test", "CREATED_%d", n) ctx.Log.Logf("test", "CREATED_1000")
listener := NewListenerExt(50000) listener := NewListenerExt(5000)
node, err := ctx.NewNode(nil, "LockableNode", listener, NewLockableExt(reqs)) node, err := NewNode(ctx, nil, "Lockable", 5000, listener, NewLockableExt(reqs))
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "CREATED_LISTENER") ctx.Log.Logf("test", "CREATED_LISTENER")
@ -91,15 +75,15 @@ func testLockN(t *testing.T, n int) {
t.Fatalf("Unexpected response to lock - %s", resp) t.Fatalf("Unexpected response to lock - %s", resp)
} }
ctx.Log.Logf("test", "LOCKED_%d", n) ctx.Log.Logf("test", "LOCKED_1000")
} }
func TestLock(t *testing.T) { func TestLock(t *testing.T) {
ctx := logTestContext(t, []string{"test", "lockable"}) ctx := logTestContext(t, []string{"test", "lockable"})
NewLockable := func(reqs []NodeID)(*Node, *ListenerExt) { NewLockable := func(reqs []NodeID)(*Node, *ListenerExt) {
listener := NewListenerExt(10000) listener := NewListenerExt(1000)
l, err := ctx.NewNode(nil, "LockableNode", listener, NewLockableExt(reqs)) l, err := NewNode(ctx, nil, "Lockable", 10, listener, NewLockableExt(reqs))
fatalErr(t, err) fatalErr(t, err)
return l, listener return l, listener
} }
@ -118,31 +102,25 @@ func TestLock(t *testing.T) {
ctx.Log.Logf("test", "l4: %s", l4.ID) ctx.Log.Logf("test", "l4: %s", l4.ID)
ctx.Log.Logf("test", "l5: %s", l5.ID) ctx.Log.Logf("test", "l5: %s", l5.ID)
ctx.Log.Logf("test", "locking l0")
id_1, err := LockLockable(ctx, l0) id_1, err := LockLockable(ctx, l0)
ctx.Log.Logf("test", "ID_1: %s", id_1)
fatalErr(t, err) fatalErr(t, err)
response, _, err := WaitForResponse(l0_listener.Chan, time.Millisecond*10, id_1) _, _, err = WaitForResponse(l0_listener.Chan, time.Millisecond*10, id_1)
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "l0 lock: %+v", response)
ctx.Log.Logf("test", "locking l1")
id_2, err := LockLockable(ctx, l1) id_2, err := LockLockable(ctx, l1)
fatalErr(t, err) fatalErr(t, err)
response, _, err = WaitForResponse(l1_listener.Chan, time.Millisecond*10000, id_2) _, _, err = WaitForResponse(l1_listener.Chan, time.Millisecond*100, id_2)
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "l1 lock: %+v", response)
ctx.Log.Logf("test", "unlocking l0")
id_3, err := UnlockLockable(ctx, l0) id_3, err := UnlockLockable(ctx, l0)
fatalErr(t, err) fatalErr(t, err)
response, _, err = WaitForResponse(l0_listener.Chan, time.Millisecond*10, id_3) _, _, err = WaitForResponse(l0_listener.Chan, time.Millisecond*10, id_3)
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "l0 unlock: %+v", response)
ctx.Log.Logf("test", "locking l1")
id_4, err := LockLockable(ctx, l1) id_4, err := LockLockable(ctx, l1)
fatalErr(t, err) fatalErr(t, err)
response, _, err = WaitForResponse(l1_listener.Chan, time.Millisecond*10, id_4)
_, _, err = WaitForResponse(l1_listener.Chan, time.Millisecond*10, id_4)
fatalErr(t, err) fatalErr(t, err)
ctx.Log.Logf("test", "l1 lock: %+v", response)
} }

@ -50,7 +50,7 @@ func (logger * ConsoleLogger) SetComponents(components []string) error {
return false return false
} }
for c := range(logger.loggers) { for c, _ := range(logger.loggers) {
if component_enabled(c) == false { if component_enabled(c) == false {
delete(logger.loggers, c) delete(logger.loggers, c)
} }

@ -1,68 +1,11 @@
package graphvent package graphvent
type Message struct { type SendMsg struct {
Node NodeID Dest NodeID
Signal Signal Signal Signal
} }
type MessageQueue struct { type RecvMsg struct {
out chan<- Message Source NodeID
in <-chan Message Signal Signal
buffer []Message
write_cursor int
read_cursor int
}
func (queue *MessageQueue) ProcessIncoming(message Message) {
if (queue.write_cursor + 1) == queue.read_cursor || ((queue.write_cursor + 1) == len(queue.buffer) && queue.read_cursor == 0) {
new_buffer := make([]Message, len(queue.buffer) * 2)
copy(new_buffer, queue.buffer[queue.read_cursor:])
first_chunk := len(queue.buffer) - queue.read_cursor
copy(new_buffer[first_chunk:], queue.buffer[0:queue.write_cursor])
queue.write_cursor = len(queue.buffer) - 1
queue.read_cursor = 0
queue.buffer = new_buffer
}
queue.buffer[queue.write_cursor] = message
queue.write_cursor += 1
if queue.write_cursor >= len(queue.buffer) {
queue.write_cursor = 0
}
}
func NewMessageQueue(initial int) (chan<- Message, <-chan Message) {
in := make(chan Message, 0)
out := make(chan Message, 0)
queue := MessageQueue{
out: out,
in: in,
buffer: make([]Message, initial),
write_cursor: 0,
read_cursor: 0,
}
go func(queue *MessageQueue) {
for true {
if queue.write_cursor != queue.read_cursor {
select {
case incoming := <-queue.in:
queue.ProcessIncoming(incoming)
case queue.out <- queue.buffer[queue.read_cursor]:
queue.read_cursor += 1
if queue.read_cursor >= len(queue.buffer) {
queue.read_cursor = 0
}
}
} else {
message := <-queue.in
queue.ProcessIncoming(message)
}
}
}(&queue)
return in, out
} }

@ -1,35 +0,0 @@
package graphvent
import (
"encoding/binary"
"testing"
)
func sendBatch(start, end uint64, in chan<- Message) {
for i := start; i <= end; i++ {
var id NodeID
binary.BigEndian.PutUint64(id[:], i)
in <- Message{id, nil}
}
}
func TestMessageQueue(t *testing.T) {
in, out := NewMessageQueue(10)
for i := uint64(0); i < 1000; i++ {
go sendBatch(1000*i, (1000*(i+1))-1, in)
}
seen := map[NodeID]any{}
for i := uint64(0); i < 1000*1000; i++ {
read := <-out
_, already_seen := seen[read.Node]
if already_seen {
t.Fatalf("Signal %d had duplicate NodeID %s", i, read.Node)
} else {
seen[read.Node] = nil
}
}
t.Logf("Processed 1M signals through queue")
}

@ -1,15 +1,17 @@
package graphvent package graphvent
import ( import (
"crypto/ed25519" "crypto/ed25519"
"crypto/sha512" "crypto/rand"
"fmt" "crypto/sha512"
"reflect" "encoding/binary"
"sync/atomic" "fmt"
"time" "reflect"
"sync/atomic"
_ "github.com/dgraph-io/badger/v3" "time"
"github.com/google/uuid"
_ "github.com/dgraph-io/badger/v3"
"github.com/google/uuid"
) )
var ( var (
@ -23,10 +25,6 @@ type NodeID uuid.UUID
func (id NodeID) MarshalBinary() ([]byte, error) { func (id NodeID) MarshalBinary() ([]byte, error) {
return (uuid.UUID)(id).MarshalBinary() return (uuid.UUID)(id).MarshalBinary()
} }
func (id *NodeID) UnmarshalBinary(data []byte) error {
return (*uuid.UUID)(id).UnmarshalBinary(data)
}
func (id NodeID) String() string { func (id NodeID) String() string {
return (uuid.UUID)(id).String() return (uuid.UUID)(id).String()
} }
@ -69,27 +67,25 @@ func (q QueuedSignal) String() string {
return fmt.Sprintf("%+v@%s", reflect.TypeOf(q.Signal), q.Time) return fmt.Sprintf("%+v@%s", reflect.TypeOf(q.Signal), q.Time)
} }
type WaitMap map[uuid.UUID]NodeID // Default message channel size for nodes
const NODE_INITIAL_QUEUE_SIZE = 2
// Nodes represent a group of extensions that can be collectively addressed // Nodes represent a group of extensions that can be collectively addressed
type Node struct { type Node struct {
Key ed25519.PrivateKey `gv:"key"` Key ed25519.PrivateKey `gv:"key"`
ID NodeID ID NodeID
Type NodeType `gv:"type"` Type NodeType `gv:"type"`
// TODO: move each extension to it's own db key, and extend changes to notify which extension was changed
Extensions map[ExtType]Extension Extensions map[ExtType]Extension
// Channel for this node to receive messages from the Context // Channel for this node to receive messages from the Context
SendChan chan<- Message MsgChan chan RecvMsg
RecvChan <-chan Message // Size of MsgChan
BufferSize uint32 `gv:"buffer_size"`
// Channel for this node to process delayed signals // Channel for this node to process delayed signals
TimeoutChan <-chan time.Time TimeoutChan <-chan time.Time
Active atomic.Bool Active atomic.Bool
// TODO: enhance WriteNode to write SignalQueue to a different key, and use writeSignalQueue to decide whether or not to update it
writeSignalQueue bool writeSignalQueue bool
SignalQueue []QueuedSignal SignalQueue []QueuedSignal
NextSignal *QueuedSignal NextSignal *QueuedSignal
@ -101,11 +97,59 @@ func (node *Node) PostDeserialize(ctx *Context) error {
public := node.Key.Public().(ed25519.PublicKey) public := node.Key.Public().(ed25519.PublicKey)
node.ID = KeyID(public) node.ID = KeyID(public)
node.SendChan, node.RecvChan = NewMessageQueue(NODE_INITIAL_QUEUE_SIZE) node.MsgChan = make(chan RecvMsg, node.BufferSize)
return nil return nil
} }
type WaitReason string
type WaitInfo struct {
Destination NodeID `gv:"destination" node:"Base"`
Timeout uuid.UUID `gv:"timeout"`
Reason WaitReason `gv:"reason"`
}
type WaitMap map[uuid.UUID]WaitInfo
// Removes a signal from the wait_map and dequeue the associated timeout signal
// Returns the data, and whether or not the ID was found in the wait_map
func (node *Node) ProcessResponse(wait_map WaitMap, response ResponseSignal) (WaitInfo, bool) {
wait_info, is_processed := wait_map[response.ResponseID()]
if is_processed == true {
delete(wait_map, response.ResponseID())
if response.ID() != wait_info.Timeout {
node.DequeueSignal(wait_info.Timeout)
}
return wait_info, true
}
return WaitInfo{}, false
}
func (node *Node) NewTimeout(reason WaitReason, dest NodeID, timeout time.Duration) (WaitInfo, uuid.UUID) {
id := uuid.New()
timeout_signal := NewTimeoutSignal(id)
node.QueueSignal(time.Now().Add(timeout), timeout_signal)
return WaitInfo{
Destination: dest,
Timeout: timeout_signal.Id,
Reason: reason,
}, id
}
// Creates a timeout signal for signal, queues it for the node at the timeout, and returns the WaitInfo
func (node *Node) QueueTimeout(reason WaitReason, dest NodeID, signal Signal, timeout time.Duration) WaitInfo {
timeout_signal := NewTimeoutSignal(signal.ID())
node.QueueSignal(time.Now().Add(timeout), timeout_signal)
return WaitInfo{
Destination: dest,
Timeout: timeout_signal.Id,
Reason: reason,
}
}
func (node *Node) QueueSignal(time time.Time, signal Signal) { func (node *Node) QueueSignal(time time.Time, signal Signal) {
node.SignalQueue = append(node.SignalQueue, QueuedSignal{signal, time}) node.SignalQueue = append(node.SignalQueue, QueuedSignal{signal, time})
node.NextSignal, node.TimeoutChan = SoonestSignal(node.SignalQueue) node.NextSignal, node.TimeoutChan = SoonestSignal(node.SignalQueue)
@ -143,23 +187,18 @@ func SoonestSignal(signals []QueuedSignal) (*QueuedSignal, <-chan time.Time) {
} }
if soonest_signal != nil { if soonest_signal != nil {
if time.Now().Compare(soonest_time) == -1 { return soonest_signal, time.After(time.Until(soonest_signal.Time))
return soonest_signal, time.After(time.Until(soonest_signal.Time))
} else {
c := make(chan time.Time, 1)
c <- soonest_time
return soonest_signal, c
}
} else { } else {
return nil, nil return nil, nil
} }
} }
func runNode(ctx *Context, node *Node, status chan string, control chan string) { func runNode(ctx *Context, node *Node) {
ctx.Log.Logf("node", "RUN_START: %s", node.ID) ctx.Log.Logf("node", "RUN_START: %s", node.ID)
err := nodeLoop(ctx, node, status, control) err := nodeLoop(ctx, node)
if err != nil { if err != nil {
ctx.Log.Logf("node", "%s runNode err %s", node.ID, err) ctx.Log.Logf("node", "%s runNode err %s", node.ID, err)
panic(err)
} }
ctx.Log.Logf("node", "RUN_STOP: %s", node.ID) ctx.Log.Logf("node", "RUN_STOP: %s", node.ID)
} }
@ -175,74 +214,43 @@ func (err StringError) MarshalBinary() ([]byte, error) {
return []byte(string(err)), nil return []byte(string(err)), nil
} }
func (node *Node) ReadFields(ctx *Context, fields []string)map[string]any { func (node *Node) ReadFields(ctx *Context, reqs map[ExtType][]string)map[ExtType]map[string]any {
ctx.Log.Logf("read_field", "Reading %+v on %+v", fields, node.ID) ctx.Log.Logf("read_field", "Reading %+v on %+v", reqs, node.ID)
values := map[string]any{} exts := map[ExtType]map[string]any{}
for ext_type, field_reqs := range(reqs) {
node_info := ctx.NodeTypes[node.Type] ext_info, ext_known := ctx.Extensions[ext_type]
if ext_known {
for _, field_name := range(fields) { fields := map[string]any{}
field_info, mapped := node_info.Fields[field_name] for _, req := range(field_reqs) {
if mapped { ext, exists := node.Extensions[ext_type]
ext := node.Extensions[field_info.Extension] if exists == false {
values[field_name] = reflect.ValueOf(ext).Elem().FieldByIndex(field_info.Index).Interface() fields[req] = fmt.Errorf("%+v does not have %+v extension", node.ID, ext_type)
} else { } else {
values[field_name] = fmt.Errorf("NodeType %s has no field %s", node.Type, field_name) fields[req] = reflect.ValueOf(ext).Elem().FieldByIndex(ext_info.Fields[req].Index).Interface()
}
}
exts[ext_type] = fields
} }
} }
return exts
return values
} }
// Main Loop for nodes // Main Loop for nodes
func nodeLoop(ctx *Context, node *Node, status chan string, control chan string) error { func nodeLoop(ctx *Context, node *Node) error {
is_started := node.Active.CompareAndSwap(false, true) started := node.Active.CompareAndSwap(false, true)
if is_started == false { if started == false {
return fmt.Errorf("%s is already started, will not start again", node.ID) return fmt.Errorf("%s is already started, will not start again", node.ID)
} else {
ctx.Log.Logf("node", "Set %s active", node.ID)
}
ctx.Log.Logf("node_ext", "Loading extensions for %s", node.ID)
for _, extension := range(node.Extensions) {
ctx.Log.Logf("node_ext", "Loading extension %s for %s", reflect.TypeOf(extension), node.ID)
err := extension.Load(ctx, node)
if err != nil {
ctx.Log.Logf("node_ext", "Failed to load extension %s on node %s", reflect.TypeOf(extension), node.ID)
node.Active.Store(false)
return err
} else {
ctx.Log.Logf("node_ext", "Loaded extension %s on node %s", reflect.TypeOf(extension), node.ID)
}
} }
ctx.Log.Logf("node_ext", "Loaded extensions for %s", node.ID) run := true
for run == true {
status <- "active"
running := true
for running {
var signal Signal var signal Signal
var source NodeID var source NodeID
select { select {
case command := <-control: case msg := <- node.MsgChan:
switch command { signal = msg.Signal
case "stop": source = msg.Source
running = false
case "pause":
status <- "paused"
command := <- control
switch command {
case "resume":
status <- "resumed"
case "stop":
running = false
}
default:
ctx.Log.Logf("node", "Unknown control command %s", command)
}
case <-node.TimeoutChan: case <-node.TimeoutChan:
signal = node.NextSignal.Signal signal = node.NextSignal.Signal
source = node.ID source = node.ID
@ -271,17 +279,15 @@ func nodeLoop(ctx *Context, node *Node, status chan string, control chan string)
} else { } else {
ctx.Log.Logf("node", "NODE_TIMEOUT(%s) - PROCESSING %+v@%s - NEXT_SIGNAL: %s@%s", node.ID, signal, t, node.NextSignal, node.NextSignal.Time) ctx.Log.Logf("node", "NODE_TIMEOUT(%s) - PROCESSING %+v@%s - NEXT_SIGNAL: %s@%s", node.ID, signal, t, node.NextSignal, node.NextSignal.Time)
} }
case msg := <- node.RecvChan:
signal = msg.Signal
source = msg.Node
} }
ctx.Log.Logf("node", "NODE_SIGNAL_QUEUE[%s]: %+v", node.ID, node.SignalQueue)
switch sig := signal.(type) { switch sig := signal.(type) {
case *ReadSignal: case *ReadSignal:
result := node.ReadFields(ctx, sig.Fields) result := node.ReadFields(ctx, sig.Extensions)
msgs := []Message{} msgs := []SendMsg{}
msgs = append(msgs, Message{source, NewReadResultSignal(sig.ID(), node.ID, node.Type, result)}) msgs = append(msgs, SendMsg{source, NewReadResultSignal(sig.ID(), node.ID, node.Type, result)})
ctx.Send(node, msgs) ctx.Send(node, msgs)
default: default:
@ -297,45 +303,31 @@ func nodeLoop(ctx *Context, node *Node, status chan string, control chan string)
if stopped == false { if stopped == false {
panic("BAD_STATE: stopping already stopped node") panic("BAD_STATE: stopping already stopped node")
} }
for _, extension := range(node.Extensions) {
extension.Unload(ctx, node)
}
status <- "stopped"
return nil return nil
} }
func (node *Node) QueueChanges(ctx *Context, changes map[ExtType]Changes) error { func (node *Node) Unload(ctx *Context) error {
node_info, exists := ctx.NodeTypes[node.Type] if node.Active.Load() {
if exists == false { for _, extension := range(node.Extensions) {
return fmt.Errorf("Node type not in context, can't map changes to field names") extension.Unload(ctx, node)
} else {
fields := []string{}
for ext_type, ext_changes := range(changes) {
ext_map, ext_mapped := node_info.ReverseFields[ext_type]
if ext_mapped {
for _, ext_tag := range(ext_changes) {
field_name, tag_mapped := ext_map[ext_tag]
if tag_mapped {
fields = append(fields, field_name)
}
}
}
}
ctx.Log.Logf("changes", "Changes to queue from %+v: %+v", node_info.ReverseFields, fields)
if len(fields) > 0 {
node.QueueSignal(time.Time{}, NewStatusSignal(node.ID, fields))
} }
return nil return nil
} else {
return fmt.Errorf("Node not active")
} }
} }
func (node *Node) QueueChanges(ctx *Context, changes map[ExtType]Changes) error {
node.QueueSignal(time.Now(), NewStatusSignal(node.ID, changes))
return nil
}
func (node *Node) Process(ctx *Context, source NodeID, signal Signal) error { func (node *Node) Process(ctx *Context, source NodeID, signal Signal) error {
messages := []Message{} ctx.Log.Logf("node_process", "PROCESSING MESSAGE: %s - %+v", node.ID, signal)
messages := []SendMsg{}
changes := map[ExtType]Changes{} changes := map[ExtType]Changes{}
for ext_type, ext := range(node.Extensions) { for ext_type, ext := range(node.Extensions) {
ctx.Log.Logf("node_process", "PROCESSING_EXTENSION: %s/%s", node.ID, ext_type)
ext_messages, ext_changes := ext.Process(ctx, node, source, signal) ext_messages, ext_changes := ext.Process(ctx, node, source, signal)
if len(ext_messages) != 0 { if len(ext_messages) != 0 {
messages = append(messages, ext_messages...) messages = append(messages, ext_messages...)
@ -344,6 +336,7 @@ func (node *Node) Process(ctx *Context, source NodeID, signal Signal) error {
changes[ext_type] = ext_changes changes[ext_type] = ext_changes
} }
} }
ctx.Log.Logf("changes", "Changes for %s after %+v - %+v", node.ID, reflect.TypeOf(signal), changes)
if len(messages) != 0 { if len(messages) != 0 {
send_err := ctx.Send(node, messages) send_err := ctx.Send(node, messages)
@ -353,7 +346,11 @@ func (node *Node) Process(ctx *Context, source NodeID, signal Signal) error {
} }
if len(changes) != 0 { if len(changes) != 0 {
ctx.Log.Logf("changes", "Changes to %s from %+v: %+v", node.ID, signal, changes) write_err := WriteNodeChanges(ctx, node, changes)
if write_err != nil {
return write_err
}
status_err := node.QueueChanges(ctx, changes) status_err := node.QueueChanges(ctx, changes)
if status_err != nil { if status_err != nil {
return status_err return status_err
@ -399,3 +396,85 @@ func KeyID(pub ed25519.PublicKey) NodeID {
id := uuid.NewHash(sha512.New(), ZeroUUID, pub, 3) id := uuid.NewHash(sha512.New(), ZeroUUID, pub, 3)
return NodeID(id) return NodeID(id)
} }
// Create a new node in memory and start it's event loop
func NewNode(ctx *Context, key ed25519.PrivateKey, type_name string, buffer_size uint32, extensions ...Extension) (*Node, error) {
node_type, known_type := ctx.NodeTypes[type_name]
if known_type == false {
return nil, fmt.Errorf("%s is not a known node type", type_name)
}
var err error
var public ed25519.PublicKey
if key == nil {
public, key, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, err
}
} else {
public = key.Public().(ed25519.PublicKey)
}
id := KeyID(public)
_, exists := ctx.Node(id)
if exists == true {
return nil, fmt.Errorf("Attempted to create an existing node")
}
ext_map := map[ExtType]Extension{}
for _, ext := range(extensions) {
ext_type, exists := ctx.ExtensionTypes[reflect.TypeOf(ext).Elem()]
if exists == false {
return nil, fmt.Errorf(fmt.Sprintf("%+v is not a known Extension", reflect.TypeOf(ext)))
}
_, exists = ext_map[ext_type.ExtType]
if exists == true {
return nil, fmt.Errorf("Cannot add the same extension to a node twice")
}
ext_map[ext_type.ExtType] = ext
}
for _, required_ext := range(node_type.Extensions) {
_, exists := ext_map[required_ext]
if exists == false {
return nil, fmt.Errorf(fmt.Sprintf("%+v requires %+v", node_type, required_ext))
}
}
node := &Node{
Key: key,
ID: id,
Type: node_type.NodeType,
Extensions: ext_map,
MsgChan: make(chan RecvMsg, buffer_size),
BufferSize: buffer_size,
SignalQueue: []QueuedSignal{},
writeSignalQueue: false,
}
err = WriteNodeInit(ctx, node)
if err != nil {
return nil, err
}
// Load each extension before starting the main loop
for _, extension := range(node.Extensions) {
err := extension.Load(ctx, node)
if err != nil {
return nil, err
}
}
ctx.AddNode(id, node)
go runNode(ctx, node)
return node, nil
}
var extension_suffix = []byte{0xEE, 0xFF, 0xEE, 0xFF}
var signal_queue_suffix = []byte{0xAB, 0xBA, 0xAB, 0xBA}
func ExtTypeSuffix(ext_type ExtType) []byte {
ret := make([]byte, 12)
copy(ret[0:4], extension_suffix)
binary.BigEndian.PutUint64(ret[4:], uint64(ext_type))
return ret
}

@ -5,19 +5,29 @@ import (
"time" "time"
"crypto/rand" "crypto/rand"
"crypto/ed25519" "crypto/ed25519"
"slices"
) )
func TestNodeDB(t *testing.T) { func TestNodeDB(t *testing.T) {
ctx := logTestContext(t, []string{"test", "node", "db"}) ctx := logTestContext(t, []string{"node", "db"})
node_listener := NewListenerExt(10) node_listener := NewListenerExt(10)
node, err := ctx.NewNode(nil, "Node", NewLockableExt(nil), node_listener) node, err := NewNode(ctx, nil, "Base", 10, NewLockableExt(nil), node_listener)
fatalErr(t, err) fatalErr(t, err)
err = ctx.Stop() _, err = WaitForSignal(node_listener.Chan, 10*time.Millisecond, func(sig *StatusSignal) bool {
gql_changes, has_gql := sig.Changes[ExtTypeFor[GQLExt]()]
if has_gql == true {
return slices.Contains(gql_changes, "state") && sig.Source == node.ID
}
return false
})
err = ctx.Unload(node.ID)
fatalErr(t, err) fatalErr(t, err)
_, err = ctx.GetNode(node.ID) ctx.nodeMap = map[NodeID]*Node{}
_, err = ctx.getNode(node.ID)
fatalErr(t, err) fatalErr(t, err)
} }
@ -36,14 +46,16 @@ func TestNodeRead(t *testing.T) {
ctx.Log.Logf("test", "N2: %s", n2_id) ctx.Log.Logf("test", "N2: %s", n2_id)
n2_listener := NewListenerExt(10) n2_listener := NewListenerExt(10)
n2, err := ctx.NewNode(n2_key, "Node", n2_listener) n2, err := NewNode(ctx, n2_key, "Base", 10, n2_listener)
fatalErr(t, err) fatalErr(t, err)
n1, err := ctx.NewNode(n1_key, "Node", NewListenerExt(10)) n1, err := NewNode(ctx, n1_key, "Base", 10, NewListenerExt(10))
fatalErr(t, err) fatalErr(t, err)
read_sig := NewReadSignal([]string{"buffer"}) read_sig := NewReadSignal(map[ExtType][]string{
msgs := []Message{{n1.ID, read_sig}} ExtTypeFor[ListenerExt](): {"buffer"},
})
msgs := []SendMsg{{n1.ID, read_sig}}
err = ctx.Send(n2, msgs) err = ctx.Send(n2, msgs)
fatalErr(t, err) fatalErr(t, err)

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"reflect" "reflect"
"math" "math"
"slices"
) )
type SerializedType uint64 type SerializedType uint64
@ -38,8 +39,14 @@ func (t FieldTag) String() string {
return fmt.Sprintf("0x%x", uint64(t)) return fmt.Sprintf("0x%x", uint64(t))
} }
func NodeTypeFor(name string) NodeType { func NodeTypeFor(extensions []ExtType) NodeType {
digest := []byte("GRAPHVENT_NODE - " + name) digest := []byte("GRAPHVENT_NODE - ")
slices.Sort(extensions)
for _, ext := range(extensions) {
digest = binary.BigEndian.AppendUint64(digest, uint64(ext))
}
hash := sha512.Sum512(digest) hash := sha512.Sum512(digest)
return NodeType(binary.BigEndian.Uint64(hash[0:8])) return NodeType(binary.BigEndian.Uint64(hash[0:8]))
@ -59,10 +66,6 @@ func ExtTypeFor[E any, T interface { *E; Extension}]() ExtType {
return ExtType(SerializedTypeFor[E]()) return ExtType(SerializedTypeFor[E]())
} }
func ExtTypeOf(t reflect.Type) ExtType {
return ExtType(SerializeType(t.Elem()))
}
func SignalTypeFor[S Signal]() SignalType { func SignalTypeFor[S Signal]() SignalType {
return SignalType(SerializedTypeFor[S]()) return SignalType(SerializedTypeFor[S]())
} }
@ -77,59 +80,49 @@ func GetFieldTag(tag string) FieldTag {
return FieldTag(Hash("GRAPHVENT_FIELD_TAG", tag)) return FieldTag(Hash("GRAPHVENT_FIELD_TAG", tag))
} }
func TypeStack(ctx *Context, t reflect.Type, data []byte) (int, error) { func TypeStack(ctx *Context, t reflect.Type) ([]byte, error) {
info, registered := ctx.Types[t] info, registered := ctx.TypeTypes[t]
if registered { if registered {
binary.BigEndian.PutUint64(data, uint64(info.Serialized)) return binary.BigEndian.AppendUint64(nil, uint64(info.Serialized)), nil
return 8, nil
} else { } else {
switch t.Kind() { switch t.Kind() {
case reflect.Map: case reflect.Map:
binary.BigEndian.PutUint64(data, uint64(SerializeType(reflect.Map))) key_stack, err := TypeStack(ctx, t.Key())
key_written, err := TypeStack(ctx, t.Key(), data[8:])
if err != nil { if err != nil {
return 0, err return nil, err
} }
elem_written, err := TypeStack(ctx, t.Elem(), data[8 + key_written:]) elem_stack, err := TypeStack(ctx, t.Elem())
if err != nil { if err != nil {
return 0, err return nil, err
} }
return 8 + key_written + elem_written, nil return append(binary.BigEndian.AppendUint64(nil, uint64(SerializeType(reflect.Map))), append(key_stack, elem_stack...)...), nil
case reflect.Pointer: case reflect.Pointer:
binary.BigEndian.PutUint64(data, uint64(SerializeType(reflect.Pointer))) elem_stack, err := TypeStack(ctx, t.Elem())
elem_written, err := TypeStack(ctx, t.Elem(), data[8:])
if err != nil { if err != nil {
return 0, err return nil, err
} }
return 8 + elem_written, nil return append(binary.BigEndian.AppendUint64(nil, uint64(SerializeType(reflect.Pointer))), elem_stack...), nil
case reflect.Slice: case reflect.Slice:
binary.BigEndian.PutUint64(data, uint64(SerializeType(reflect.Slice))) elem_stack, err := TypeStack(ctx, t.Elem())
elem_written, err := TypeStack(ctx, t.Elem(), data[8:])
if err != nil { if err != nil {
return 0, err return nil, err
} }
return 8 + elem_written, nil return append(binary.BigEndian.AppendUint64(nil, uint64(SerializeType(reflect.Slice))), elem_stack...), nil
case reflect.Array: case reflect.Array:
binary.BigEndian.PutUint64(data, uint64(SerializeType(reflect.Array))) elem_stack, err := TypeStack(ctx, t.Elem())
binary.BigEndian.PutUint64(data[8:], uint64(t.Len()))
elem_written, err := TypeStack(ctx, t.Elem(), data[16:])
if err != nil { if err != nil {
return 0, err return nil, err
} }
return 16 + elem_written, nil stack := binary.BigEndian.AppendUint64(nil, uint64(SerializeType(reflect.Array)))
stack = binary.BigEndian.AppendUint64(stack, uint64(t.Len()))
return append(stack, elem_stack...), nil
default: default:
return 0, fmt.Errorf("Hit %s, which is not a registered type", t.String()) return nil, fmt.Errorf("Hit %s, which is not a registered type", t.String())
} }
} }
} }
@ -138,7 +131,7 @@ func UnwrapStack(ctx *Context, stack []byte) (reflect.Type, []byte, error) {
first_bytes, left := split(stack, 8) first_bytes, left := split(stack, 8)
first := SerializedType(binary.BigEndian.Uint64(first_bytes)) first := SerializedType(binary.BigEndian.Uint64(first_bytes))
info, registered := ctx.TypesReverse[first] info, registered := ctx.TypeMap[first]
if registered { if registered {
return info.Reflect, left, nil return info.Reflect, left, nil
} else { } else {
@ -183,18 +176,18 @@ func UnwrapStack(ctx *Context, stack []byte) (reflect.Type, []byte, error) {
} }
} }
func Serialize[T any](ctx *Context, value T, data []byte) (int, error) { func Serialize[T any](ctx *Context, value T) ([]byte, error) {
return SerializeValue(ctx, reflect.ValueOf(&value).Elem(), data) return serializeValue(ctx, reflect.ValueOf(&value).Elem())
} }
func Deserialize[T any](ctx *Context, data []byte) (T, error) { func Deserialize[T any](ctx *Context, data []byte) (T, error) {
reflect_type := reflect.TypeFor[T]() reflect_type := reflect.TypeFor[T]()
var zero T var zero T
value, left, err := DeserializeValue(ctx, data, reflect_type) value, left, err := deserializeValue(ctx, data, reflect_type)
if err != nil { if err != nil {
return zero, err return zero, err
} else if len(left) != 0 { } else if len(left) != 0 {
return zero, fmt.Errorf("%d/%d bytes left after deserializing %+v", len(left), len(data), value) return zero, fmt.Errorf("%d bytes left after deserializing %+v", len(left), value)
} else if value.Type() != reflect_type { } else if value.Type() != reflect_type {
return zero, fmt.Errorf("Deserialized type %s does not match %s", value.Type(), reflect_type) return zero, fmt.Errorf("Deserialized type %s does not match %s", value.Type(), reflect_type)
} }
@ -202,156 +195,10 @@ func Deserialize[T any](ctx *Context, data []byte) (T, error) {
return value.Interface().(T), nil return value.Interface().(T), nil
} }
func SerializedSize(ctx *Context, value reflect.Value) (int, error) { func serializeValue(ctx *Context, value reflect.Value) ([]byte, error) {
var sizefn SerializedSizeFn = nil
info, registered := ctx.Types[value.Type()]
if registered {
sizefn = info.SerializedSize
}
if sizefn == nil {
switch value.Type().Kind() {
case reflect.Bool:
return 1, nil
case reflect.Int8:
return 1, nil
case reflect.Int16:
return 2, nil
case reflect.Int32:
return 4, nil
case reflect.Int64:
fallthrough
case reflect.Int:
return 8, nil
case reflect.Uint8:
return 1, nil
case reflect.Uint16:
return 2, nil
case reflect.Uint32:
return 4, nil
case reflect.Uint64:
fallthrough
case reflect.Uint:
return 8, nil
case reflect.Float32:
return 4, nil
case reflect.Float64:
return 8, nil
case reflect.String:
return 8 + value.Len(), nil
case reflect.Pointer:
if value.IsNil() {
return 1, nil
} else {
elem_len, err := SerializedSize(ctx, value.Elem())
if err != nil {
return 0, err
} else {
return 1 + elem_len, nil
}
}
case reflect.Slice:
if value.IsNil() {
return 1, nil
} else {
elem_total := 0
for i := 0; i < value.Len(); i++ {
elem_len, err := SerializedSize(ctx, value.Index(i))
if err != nil {
return 0, err
}
elem_total += elem_len
}
return 9 + elem_total, nil
}
case reflect.Array:
total := 0
for i := 0; i < value.Len(); i++ {
elem_len, err := SerializedSize(ctx, value.Index(i))
if err != nil {
return 0, err
}
total += elem_len
}
return total, nil
case reflect.Map:
if value.IsNil() {
return 1, nil
} else {
key := reflect.New(value.Type().Key()).Elem()
val := reflect.New(value.Type().Elem()).Elem()
iter := value.MapRange()
total := 0
for iter.Next() {
key.SetIterKey(iter)
k, err := SerializedSize(ctx, key)
if err != nil {
return 0, err
}
total += k
val.SetIterValue(iter)
v, err := SerializedSize(ctx, val)
if err != nil {
return 0, err
}
total += v
}
return 9 + total, nil
}
case reflect.Struct:
if registered == false {
return 0, fmt.Errorf("Can't serialize unregistered struct %s", value.Type())
} else {
field_total := 0
for _, field_info := range(info.Fields) {
field_size, err := SerializedSize(ctx, value.FieldByIndex(field_info.Index))
if err != nil {
return 0, err
}
field_total += 8
field_total += field_size
}
return 8 + field_total, nil
}
case reflect.Interface:
// TODO get size of TypeStack instead of just using 128
elem_size, err := SerializedSize(ctx, value.Elem())
if err != nil {
return 0, err
}
return 128 + elem_size, nil
default:
return 0, fmt.Errorf("Don't know how to serialize %s", value.Type())
}
} else {
return sizefn(ctx, value)
}
}
func SerializeValue(ctx *Context, value reflect.Value, data []byte) (int, error) {
var serialize SerializeFn = nil var serialize SerializeFn = nil
info, registered := ctx.Types[value.Type()] info, registered := ctx.TypeTypes[value.Type()]
if registered { if registered {
serialize = info.Serialize serialize = info.Serialize
} }
@ -360,160 +207,144 @@ func SerializeValue(ctx *Context, value reflect.Value, data []byte) (int, error)
switch value.Type().Kind() { switch value.Type().Kind() {
case reflect.Bool: case reflect.Bool:
if value.Bool() { if value.Bool() {
data[0] = 0xFF return []byte{0xFF}, nil
} else { } else {
data[0] = 0x00 return []byte{0x00}, nil
} }
return 1, nil
case reflect.Int8: case reflect.Int8:
data[0] = byte(value.Int()) return []byte{byte(value.Int())}, nil
return 1, nil
case reflect.Int16: case reflect.Int16:
binary.BigEndian.PutUint16(data, uint16(value.Int())) return binary.BigEndian.AppendUint16(nil, uint16(value.Int())), nil
return 2, nil
case reflect.Int32: case reflect.Int32:
binary.BigEndian.PutUint32(data, uint32(value.Int())) return binary.BigEndian.AppendUint32(nil, uint32(value.Int())), nil
return 4, nil
case reflect.Int64: case reflect.Int64:
fallthrough fallthrough
case reflect.Int: case reflect.Int:
binary.BigEndian.PutUint64(data, uint64(value.Int())) return binary.BigEndian.AppendUint64(nil, uint64(value.Int())), nil
return 8, nil
case reflect.Uint8: case reflect.Uint8:
data[0] = byte(value.Uint()) return []byte{byte(value.Uint())}, nil
return 1, nil
case reflect.Uint16: case reflect.Uint16:
binary.BigEndian.PutUint16(data, uint16(value.Uint())) return binary.BigEndian.AppendUint16(nil, uint16(value.Uint())), nil
return 2, nil
case reflect.Uint32: case reflect.Uint32:
binary.BigEndian.PutUint32(data, uint32(value.Uint())) return binary.BigEndian.AppendUint32(nil, uint32(value.Uint())), nil
return 4, nil
case reflect.Uint64: case reflect.Uint64:
fallthrough fallthrough
case reflect.Uint: case reflect.Uint:
binary.BigEndian.PutUint64(data, value.Uint()) return binary.BigEndian.AppendUint64(nil, value.Uint()), nil
return 8, nil
case reflect.Float32: case reflect.Float32:
binary.BigEndian.PutUint32(data, math.Float32bits(float32(value.Float()))) return binary.BigEndian.AppendUint32(nil, math.Float32bits(float32(value.Float()))), nil
return 4, nil
case reflect.Float64: case reflect.Float64:
binary.BigEndian.PutUint64(data, math.Float64bits(value.Float())) return binary.BigEndian.AppendUint64(nil, math.Float64bits(value.Float())), nil
return 8, nil
case reflect.String: case reflect.String:
binary.BigEndian.PutUint64(data, uint64(value.Len())) len_bytes := make([]byte, 8)
copy(data[8:], []byte(value.String())) binary.BigEndian.PutUint64(len_bytes, uint64(value.Len()))
return 8 + value.Len(), nil return append(len_bytes, []byte(value.String())...), nil
case reflect.Pointer: case reflect.Pointer:
if value.IsNil() { if value.IsNil() {
data[0] = 0x00 return []byte{0x00}, nil
return 1, nil
} else { } else {
data[0] = 0x01 elem, err := serializeValue(ctx, value.Elem())
written, err := SerializeValue(ctx, value.Elem(), data[1:])
if err != nil { if err != nil {
return 0, err return nil, err
} }
return 1 + written, nil
return append([]byte{0x01}, elem...), nil
} }
case reflect.Slice: case reflect.Slice:
if value.IsNil() { if value.IsNil() {
data[0] = 0x00 return []byte{0x00}, nil
return 8, nil
} else { } else {
data[0] = 0x01 len_bytes := make([]byte, 8)
binary.BigEndian.PutUint64(data[1:], uint64(value.Len())) binary.BigEndian.PutUint64(len_bytes, uint64(value.Len()))
total_written := 0
data := []byte{}
for i := 0; i < value.Len(); i++ { for i := 0; i < value.Len(); i++ {
written, err := SerializeValue(ctx, value.Index(i), data[9+total_written:]) elem, err := serializeValue(ctx, value.Index(i))
if err != nil { if err != nil {
return 0, err return nil, err
} }
total_written += written
data = append(data, elem...)
} }
return 9 + total_written, nil
return append(len_bytes, data...), nil
} }
case reflect.Array: case reflect.Array:
total_written := 0 data := []byte{}
for i := 0; i < value.Len(); i++ { for i := 0; i < value.Len(); i++ {
written, err := SerializeValue(ctx, value.Index(i), data[total_written:]) elem, err := serializeValue(ctx, value.Index(i))
if err != nil { if err != nil {
return 0, err return nil, err
} }
total_written += written
data = append(data, elem...)
} }
return total_written, nil return data, nil
case reflect.Map: case reflect.Map:
if value.IsNil() { len_bytes := make([]byte, 8)
data[0] = 0x00 binary.BigEndian.PutUint64(len_bytes, uint64(value.Len()))
return 1, nil
} else {
data[0] = 0x01
binary.BigEndian.PutUint64(data[1:], uint64(value.Len()))
key := reflect.New(value.Type().Key()).Elem()
val := reflect.New(value.Type().Elem()).Elem()
iter := value.MapRange()
total_written := 0
for iter.Next() {
key.SetIterKey(iter)
val.SetIterValue(iter)
k, err := SerializeValue(ctx, key, data[9+total_written:])
if err != nil {
return 0, err
}
total_written += k
v, err := SerializeValue(ctx, val, data[9+total_written:]) data := []byte{}
if err != nil { iter := value.MapRange()
return 0, err for iter.Next() {
} k, err := serializeValue(ctx, iter.Key())
total_written += v if err != nil {
return nil, err
} }
return 9 + total_written, nil
data = append(data, k...)
v, err := serializeValue(ctx, iter.Value())
if err != nil {
return nil, err
}
data = append(data, v...)
} }
return append(len_bytes, data...), nil
case reflect.Struct: case reflect.Struct:
if registered == false { if registered == false {
return 0, fmt.Errorf("Cannot serialize unregistered struct %s", value.Type()) return nil, fmt.Errorf("Cannot serialize unregistered struct %s", value.Type())
} else { } else {
binary.BigEndian.PutUint64(data, uint64(len(info.Fields))) data := binary.BigEndian.AppendUint64(nil, uint64(len(info.Fields)))
total_written := 0
for field_tag, field_info := range(info.Fields) { for field_tag, field_info := range(info.Fields) {
binary.BigEndian.PutUint64(data[8+total_written:], uint64(field_tag)) data = append(data, binary.BigEndian.AppendUint64(nil, uint64(field_tag))...)
total_written += 8 field_bytes, err := serializeValue(ctx, value.FieldByIndex(field_info.Index))
written, err := SerializeValue(ctx, value.FieldByIndex(field_info.Index), data[8+total_written:])
if err != nil { if err != nil {
return 0, err return nil, err
} }
total_written += written
data = append(data, field_bytes...)
} }
return 8 + total_written, nil return data, nil
} }
case reflect.Interface: case reflect.Interface:
type_written, err := TypeStack(ctx, value.Elem().Type(), data) data, err := TypeStack(ctx, value.Elem().Type())
elem_written, err := SerializeValue(ctx, value.Elem(), data[type_written:]) val_data, err := serializeValue(ctx, value.Elem())
if err != nil { if err != nil {
return 0, err return nil, err
} }
return type_written + elem_written, nil data = append(data, val_data...)
return data, nil
default: default:
return 0, fmt.Errorf("Don't know how to serialize %s", value.Type()) return nil, fmt.Errorf("Don't know how to serialize %s", value.Type())
} }
} else { } else {
return serialize(ctx, value, data) return serialize(ctx, value)
} }
} }
@ -521,10 +352,10 @@ func split(data []byte, n int) ([]byte, []byte) {
return data[:n], data[n:] return data[:n], data[n:]
} }
func DeserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value, []byte, error) { func deserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value, []byte, error) {
var deserialize DeserializeFn = nil var deserialize DeserializeFn = nil
info, registered := ctx.Types[t] info, registered := ctx.TypeTypes[t]
if registered { if registered {
deserialize = info.Deserialize deserialize = info.Deserialize
} }
@ -608,7 +439,7 @@ func DeserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value,
value.SetZero() value.SetZero()
return value, after_flags, nil return value, after_flags, nil
} else { } else {
elem_value, after_elem, err := DeserializeValue(ctx, after_flags, t.Elem()) elem_value, after_elem, err := deserializeValue(ctx, after_flags, t.Elem())
if err != nil { if err != nil {
return reflect.Value{}, nil, err return reflect.Value{}, nil, err
} }
@ -617,25 +448,19 @@ func DeserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value,
} }
case reflect.Slice: case reflect.Slice:
nil_byte := data[0] len_bytes, left := split(data, 8)
data = data[1:] length := int(binary.BigEndian.Uint64(len_bytes))
if nil_byte == 0x00 { value := reflect.MakeSlice(t, length, length)
return reflect.New(t).Elem(), data, nil for i := 0; i < length; i++ {
} else { var elem_value reflect.Value
len_bytes, left := split(data, 8) var err error
length := int(binary.BigEndian.Uint64(len_bytes)) elem_value, left, err = deserializeValue(ctx, left, t.Elem())
value := reflect.MakeSlice(t, length, length) if err != nil {
for i := 0; i < length; i++ { return reflect.Value{}, nil, err
var elem_value reflect.Value
var err error
elem_value, left, err = DeserializeValue(ctx, left, t.Elem())
if err != nil {
return reflect.Value{}, nil, err
}
value.Index(i).Set(elem_value)
} }
return value, left, nil value.Index(i).Set(elem_value)
} }
return value, left, nil
case reflect.Array: case reflect.Array:
value := reflect.New(t).Elem() value := reflect.New(t).Elem()
@ -643,7 +468,7 @@ func DeserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value,
for i := 0; i < t.Len(); i++ { for i := 0; i < t.Len(); i++ {
var elem_value reflect.Value var elem_value reflect.Value
var err error var err error
elem_value, left, err = DeserializeValue(ctx, left, t.Elem()) elem_value, left, err = deserializeValue(ctx, left, t.Elem())
if err != nil { if err != nil {
return reflect.Value{}, nil, err return reflect.Value{}, nil, err
} }
@ -652,38 +477,33 @@ func DeserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value,
return value, left, nil return value, left, nil
case reflect.Map: case reflect.Map:
flags, after_flags := split(data, 1) len_bytes, left := split(data, 8)
if flags[0] == 0x00 { length := int(binary.BigEndian.Uint64(len_bytes))
return reflect.New(t).Elem(), after_flags, nil
} else {
len_bytes, left := split(after_flags, 8)
length := int(binary.BigEndian.Uint64(len_bytes))
value := reflect.MakeMapWithSize(t, length) value := reflect.MakeMapWithSize(t, length)
for i := 0; i < length; i++ { for i := 0; i < length; i++ {
var key_value reflect.Value var key_value reflect.Value
var val_value reflect.Value var val_value reflect.Value
var err error var err error
key_value, left, err = DeserializeValue(ctx, left, t.Key())
if err != nil {
return reflect.Value{}, nil, err
}
val_value, left, err = DeserializeValue(ctx, left, t.Elem()) key_value, left, err = deserializeValue(ctx, left, t.Key())
if err != nil { if err != nil {
return reflect.Value{}, nil, err return reflect.Value{}, nil, err
} }
value.SetMapIndex(key_value, val_value) val_value, left, err = deserializeValue(ctx, left, t.Elem())
if err != nil {
return reflect.Value{}, nil, err
} }
return value, left, nil value.SetMapIndex(key_value, val_value)
} }
return value, left, nil
case reflect.Struct: case reflect.Struct:
info, mapped := ctx.Types[t] info, mapped := ctx.TypeTypes[t]
if mapped { if mapped {
value := reflect.New(t).Elem() value := reflect.New(t).Elem()
@ -700,7 +520,7 @@ func DeserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value,
if mapped { if mapped {
var field_val reflect.Value var field_val reflect.Value
var err error var err error
field_val, left, err = DeserializeValue(ctx, left, field_info.Type) field_val, left, err = deserializeValue(ctx, left, field_info.Type)
if err != nil { if err != nil {
return reflect.Value{}, nil, err return reflect.Value{}, nil, err
} }
@ -724,7 +544,7 @@ func DeserializeValue(ctx *Context, data []byte, t reflect.Type) (reflect.Value,
return reflect.Value{}, nil, err return reflect.Value{}, nil, err
} }
elem_val, left, err := DeserializeValue(ctx, rest, elem_type) elem_val, left, err := deserializeValue(ctx, rest, elem_type)
if err != nil { if err != nil {
return reflect.Value{}, nil, err return reflect.Value{}, nil, err
} }

@ -7,13 +7,10 @@ import (
) )
func testTypeStack[T any](t *testing.T, ctx *Context) { func testTypeStack[T any](t *testing.T, ctx *Context) {
buffer := [1024]byte{}
reflect_type := reflect.TypeFor[T]() reflect_type := reflect.TypeFor[T]()
written, err := TypeStack(ctx, reflect_type, buffer[:]) stack, err := TypeStack(ctx, reflect_type)
fatalErr(t, err) fatalErr(t, err)
stack := buffer[:written]
ctx.Log.Logf("test", "TypeStack[%s]: %+v", reflect_type, stack) ctx.Log.Logf("test", "TypeStack[%s]: %+v", reflect_type, stack)
unwrapped_type, rest, err := UnwrapStack(ctx, stack) unwrapped_type, rest, err := UnwrapStack(ctx, stack)
@ -44,12 +41,9 @@ func TestSerializeTypes(t *testing.T) {
} }
func testSerializeCompare[T comparable](t *testing.T, ctx *Context, value T) { func testSerializeCompare[T comparable](t *testing.T, ctx *Context, value T) {
buffer := [1024]byte{} serialized, err := Serialize(ctx, value)
written, err := Serialize(ctx, value, buffer[:])
fatalErr(t, err) fatalErr(t, err)
serialized := buffer[:written]
ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[T](), value, serialized) ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[T](), value, serialized)
deserialized, err := Deserialize[T](ctx, serialized) deserialized, err := Deserialize[T](ctx, serialized)
@ -63,12 +57,9 @@ func testSerializeCompare[T comparable](t *testing.T, ctx *Context, value T) {
} }
func testSerializeList[L []T, T comparable](t *testing.T, ctx *Context, value L) { func testSerializeList[L []T, T comparable](t *testing.T, ctx *Context, value L) {
buffer := [1024]byte{} serialized, err := Serialize(ctx, value)
written, err := Serialize(ctx, value, buffer[:])
fatalErr(t, err) fatalErr(t, err)
serialized := buffer[:written]
ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[L](), value, serialized) ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[L](), value, serialized)
deserialized, err := Deserialize[L](ctx, serialized) deserialized, err := Deserialize[L](ctx, serialized)
@ -84,13 +75,9 @@ func testSerializeList[L []T, T comparable](t *testing.T, ctx *Context, value L)
} }
func testSerializePointer[P interface {*T}, T comparable](t *testing.T, ctx *Context, value P) { func testSerializePointer[P interface {*T}, T comparable](t *testing.T, ctx *Context, value P) {
buffer := [1024]byte{} serialized, err := Serialize(ctx, value)
written, err := Serialize(ctx, value, buffer[:])
fatalErr(t, err) fatalErr(t, err)
serialized := buffer[:written]
ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[P](), value, serialized) ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[P](), value, serialized)
deserialized, err := Deserialize[P](ctx, serialized) deserialized, err := Deserialize[P](ctx, serialized)
@ -110,12 +97,9 @@ func testSerializePointer[P interface {*T}, T comparable](t *testing.T, ctx *Con
} }
func testSerialize[T any](t *testing.T, ctx *Context, value T) { func testSerialize[T any](t *testing.T, ctx *Context, value T) {
buffer := [1024]byte{} serialized, err := Serialize(ctx, value)
written, err := Serialize(ctx, value, buffer[:])
fatalErr(t, err) fatalErr(t, err)
serialized := buffer[:written]
ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[T](), value, serialized) ctx.Log.Logf("test", "Serialized Value[%s : %+v]: %+v", reflect.TypeFor[T](), value, serialized)
deserialized, err := Deserialize[T](ctx, serialized) deserialized, err := Deserialize[T](ctx, serialized)
@ -160,17 +144,7 @@ func TestSerializeValues(t *testing.T) {
testSerializeCompare[*int](t, ctx, nil) testSerializeCompare[*int](t, ctx, nil)
testSerializeCompare(t, ctx, "string") testSerializeCompare(t, ctx, "string")
testSerialize(t, ctx, map[string]string{ node, err := NewNode(ctx, nil, "Base", 100)
"Test": "Test",
"key": "String",
"": "",
})
testSerialize[map[string]string](t, ctx, nil)
testSerialize(t, ctx, NewListenerExt(10))
node, err := ctx.NewNode(nil, "Node")
fatalErr(t, err) fatalErr(t, err)
testSerialize(t, ctx, node) testSerialize(t, ctx, node)
} }

@ -21,6 +21,13 @@ func (signal TimeoutSignal) String() string {
return fmt.Sprintf("TimeoutSignal(%s)", &signal.ResponseHeader) return fmt.Sprintf("TimeoutSignal(%s)", &signal.ResponseHeader)
} }
type SignalDirection int
const (
Up SignalDirection = iota
Down
Direct
)
type SignalHeader struct { type SignalHeader struct {
Id uuid.UUID `gv:"id"` Id uuid.UUID `gv:"id"`
} }
@ -30,7 +37,7 @@ func (signal SignalHeader) ID() uuid.UUID {
} }
func (header SignalHeader) String() string { func (header SignalHeader) String() string {
return fmt.Sprintf("%s", header.Id) return fmt.Sprintf("SignalHeader(%s)", header.Id)
} }
type ResponseSignal interface { type ResponseSignal interface {
@ -48,7 +55,7 @@ func (header ResponseHeader) ResponseID() uuid.UUID {
} }
func (header ResponseHeader) String() string { func (header ResponseHeader) String() string {
return fmt.Sprintf("%s for %s", header.Id, header.ReqID) return fmt.Sprintf("ResponseHeader(%s, %s)", header.Id, header.ReqID)
} }
type Signal interface { type Signal interface {
@ -164,16 +171,16 @@ func NewACLTimeoutSignal(req_id uuid.UUID) *ACLTimeoutSignal {
type StatusSignal struct { type StatusSignal struct {
SignalHeader SignalHeader
Source NodeID `gv:"source"` Source NodeID `gv:"source"`
Fields []string `gv:"fields"` Changes map[ExtType]Changes `gv:"changes"`
} }
func (signal StatusSignal) String() string { func (signal StatusSignal) String() string {
return fmt.Sprintf("StatusSignal(%s: %+v)", signal.Source, signal.Fields) return fmt.Sprintf("StatusSignal(%s, %+v)", signal.SignalHeader, signal.Changes)
} }
func NewStatusSignal(source NodeID, fields []string) *StatusSignal { func NewStatusSignal(source NodeID, changes map[ExtType]Changes) *StatusSignal {
return &StatusSignal{ return &StatusSignal{
NewSignalHeader(), NewSignalHeader(),
source, source,
fields, changes,
} }
} }
@ -198,44 +205,32 @@ func NewLinkSignal(action string, id NodeID) Signal {
type LockSignal struct { type LockSignal struct {
SignalHeader SignalHeader
State string
} }
func (signal LockSignal) String() string { func (signal LockSignal) String() string {
return fmt.Sprintf("LockSignal(%s)", signal.SignalHeader) return fmt.Sprintf("LockSignal(%s, %s)", signal.SignalHeader, signal.State)
} }
func NewLockSignal() *LockSignal { func NewLockSignal(state string) *LockSignal {
return &LockSignal{ return &LockSignal{
NewSignalHeader(), NewSignalHeader(),
state,
} }
} }
type UnlockSignal struct {
SignalHeader
}
func (signal UnlockSignal) String() string {
return fmt.Sprintf("UnlockSignal(%s)", signal.SignalHeader)
}
func NewUnlockSignal() *UnlockSignal {
return &UnlockSignal{
NewSignalHeader(),
}
}
type ReadSignal struct { type ReadSignal struct {
SignalHeader SignalHeader
Fields []string `json:"extensions"` Extensions map[ExtType][]string `json:"extensions"`
} }
func (signal ReadSignal) String() string { func (signal ReadSignal) String() string {
return fmt.Sprintf("ReadSignal(%s, %+v)", signal.SignalHeader, signal.Fields) return fmt.Sprintf("ReadSignal(%s, %+v)", signal.SignalHeader, signal.Extensions)
} }
func NewReadSignal(fields []string) *ReadSignal { func NewReadSignal(exts map[ExtType][]string) *ReadSignal {
return &ReadSignal{ return &ReadSignal{
NewSignalHeader(), NewSignalHeader(),
fields, exts,
} }
} }
@ -243,19 +238,19 @@ type ReadResultSignal struct {
ResponseHeader ResponseHeader
NodeID NodeID NodeID NodeID
NodeType NodeType NodeType NodeType
Fields map[string]any Extensions map[ExtType]map[string]any
} }
func (signal ReadResultSignal) String() string { func (signal ReadResultSignal) String() string {
return fmt.Sprintf("ReadResultSignal(%s, %s, %+v)", signal.ResponseHeader, signal.NodeID, signal.Fields) return fmt.Sprintf("ReadResultSignal(%s, %s, %+v)", signal.ResponseHeader, signal.NodeID, signal.Extensions)
} }
func NewReadResultSignal(req_id uuid.UUID, node_id NodeID, node_type NodeType, fields map[string]any) *ReadResultSignal { func NewReadResultSignal(req_id uuid.UUID, node_id NodeID, node_type NodeType, exts map[ExtType]map[string]any) *ReadResultSignal {
return &ReadResultSignal{ return &ReadResultSignal{
NewResponseHeader(req_id), NewResponseHeader(req_id),
node_id, node_id,
node_type, node_type,
fields, exts,
} }
} }