2023-07-21 15:16:35 -06:00
|
|
|
package graphvent
|
|
|
|
import (
|
2023-07-29 16:00:01 -06:00
|
|
|
"time"
|
2023-07-29 16:34:21 -06:00
|
|
|
"reflect"
|
2023-09-18 11:15:58 -06:00
|
|
|
"fmt"
|
2023-07-21 15:16:35 -06:00
|
|
|
"github.com/graphql-go/graphql"
|
2023-07-29 11:03:41 -06:00
|
|
|
"github.com/graphql-go/graphql/language/ast"
|
2023-07-29 19:16:33 -06:00
|
|
|
"github.com/google/uuid"
|
2023-07-21 15:16:35 -06:00
|
|
|
)
|
|
|
|
|
2023-09-18 11:15:58 -06:00
|
|
|
func ResolveNodeID(p graphql.ResolveParams) (interface{}, error) {
|
|
|
|
node, ok := p.Source.(NodeResult)
|
|
|
|
if ok == false {
|
|
|
|
return nil, fmt.Errorf("Can't get NodeID from %+v", reflect.TypeOf(p.Source))
|
|
|
|
}
|
|
|
|
|
2023-09-18 12:02:30 -06:00
|
|
|
return node.NodeID, nil
|
2023-09-18 11:15:58 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func ResolveNodeTypeHash(p graphql.ResolveParams) (interface{}, error) {
|
|
|
|
node, ok := p.Source.(NodeResult)
|
|
|
|
if ok == false {
|
|
|
|
return nil, fmt.Errorf("Can't get TypeHash from %+v", reflect.TypeOf(p.Source))
|
|
|
|
}
|
|
|
|
|
2023-09-18 12:02:30 -06:00
|
|
|
return uint64(node.NodeType), nil
|
2023-09-18 11:15:58 -06:00
|
|
|
}
|
|
|
|
|
2023-07-29 16:34:21 -06:00
|
|
|
func GetFieldNames(ctx *Context, selection_set *ast.SelectionSet) []string {
|
2023-07-29 11:03:41 -06:00
|
|
|
names := []string{}
|
2023-07-29 16:34:21 -06:00
|
|
|
if selection_set == nil {
|
|
|
|
return names
|
|
|
|
}
|
2023-07-29 11:03:41 -06:00
|
|
|
|
2023-07-29 16:34:21 -06:00
|
|
|
for _, sel := range(selection_set.Selections) {
|
|
|
|
switch field := sel.(type) {
|
|
|
|
case *ast.Field:
|
|
|
|
names = append(names, field.Name.Value)
|
|
|
|
case *ast.InlineFragment:
|
|
|
|
names = append(names, GetFieldNames(ctx, field.SelectionSet)...)
|
|
|
|
default:
|
|
|
|
ctx.Log.Logf("gql", "Unknown selection type: %s", reflect.TypeOf(field))
|
2023-07-29 11:03:41 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
2023-07-29 16:34:21 -06:00
|
|
|
func GetResolveFields(ctx *Context, p graphql.ResolveParams) []string {
|
|
|
|
names := []string{}
|
|
|
|
for _, field := range(p.Info.FieldASTs) {
|
|
|
|
names = append(names, GetFieldNames(ctx, field.SelectionSet)...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
2023-07-29 19:16:33 -06:00
|
|
|
func ResolveNodes(ctx *ResolveContext, p graphql.ResolveParams, ids []NodeID) ([]NodeResult, error) {
|
2023-07-29 18:27:52 -06:00
|
|
|
fields := GetResolveFields(ctx.Context, p)
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "RESOLVE_NODES(%+v): %+v", ids, fields)
|
2023-07-29 18:27:52 -06:00
|
|
|
|
2023-07-31 20:53:56 -06:00
|
|
|
resp_channels := map[uuid.UUID]chan Signal{}
|
2023-09-18 12:02:30 -06:00
|
|
|
indices := map[uuid.UUID]int{}
|
|
|
|
|
|
|
|
// Get a list of fields that will be written
|
|
|
|
ext_fields, err := ctx.GQLContext.GetACLFields(p.Info.FieldName, fields)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "ACL Fields from request: %+v", ext_fields)
|
2023-09-18 12:02:30 -06:00
|
|
|
|
|
|
|
responses := make([]NodeResult, len(ids))
|
|
|
|
|
|
|
|
for i, id := range(ids) {
|
|
|
|
var read_signal *ReadSignal = nil
|
|
|
|
|
|
|
|
node, cached := ctx.NodeCache[id]
|
|
|
|
if cached == true {
|
|
|
|
resolve := false
|
|
|
|
missing_exts := map[ExtType][]string{}
|
|
|
|
for ext_type, fields := range(ext_fields) {
|
|
|
|
cached_ext, exists := node.Data[ext_type]
|
|
|
|
if exists == true {
|
|
|
|
missing_fields := []string{}
|
|
|
|
for _, field_name := range(fields) {
|
|
|
|
_, found := cached_ext[field_name]
|
|
|
|
if found == false {
|
|
|
|
missing_fields = append(missing_fields, field_name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(missing_fields) > 0 {
|
|
|
|
missing_exts[ext_type] = missing_fields
|
|
|
|
resolve = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
missing_exts[ext_type] = fields
|
|
|
|
resolve = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if resolve == true {
|
|
|
|
read_signal = NewReadSignal(missing_exts)
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "sending read for %+v because of missing fields %+v", id, missing_exts)
|
2023-09-18 12:02:30 -06:00
|
|
|
} else {
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "Using cached response for %+v(%d)", id, i)
|
2023-09-18 12:02:30 -06:00
|
|
|
responses[i] = node
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else {
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "sending read for %+v", id)
|
2023-09-18 12:02:30 -06:00
|
|
|
read_signal = NewReadSignal(ext_fields)
|
2023-07-29 19:16:33 -06:00
|
|
|
}
|
|
|
|
// Create a read signal, send it to the specified node, and add the wait to the response map if the send returns no error
|
2023-08-08 14:00:17 -06:00
|
|
|
msgs := Messages{}
|
2023-10-14 15:05:23 -06:00
|
|
|
msgs = msgs.Add(ctx.Context, id, ctx.Server, ctx.Authorization, read_signal)
|
2023-07-29 19:16:33 -06:00
|
|
|
|
2023-10-01 16:45:03 -06:00
|
|
|
response_chan := ctx.Ext.GetResponseChannel(read_signal.ID())
|
|
|
|
resp_channels[read_signal.ID()] = response_chan
|
|
|
|
indices[read_signal.ID()] = i
|
2023-07-31 20:53:56 -06:00
|
|
|
|
2023-09-14 15:50:08 -06:00
|
|
|
// TODO: Send all at once instead of creating Messages for each
|
2023-08-08 14:00:17 -06:00
|
|
|
err = ctx.Context.Send(msgs)
|
2023-07-29 19:16:33 -06:00
|
|
|
if err != nil {
|
2023-10-01 16:45:03 -06:00
|
|
|
ctx.Ext.FreeResponseChannel(read_signal.ID())
|
2023-07-29 19:16:33 -06:00
|
|
|
return nil, err
|
|
|
|
}
|
2023-07-29 18:27:52 -06:00
|
|
|
}
|
2023-07-29 16:00:01 -06:00
|
|
|
|
2023-10-14 15:53:20 -06:00
|
|
|
errors := ""
|
2023-07-31 20:53:56 -06:00
|
|
|
for sig_id, response_chan := range(resp_channels) {
|
2023-07-29 19:16:33 -06:00
|
|
|
// Wait for the response, returning an error on timeout
|
2023-11-01 19:32:36 -06:00
|
|
|
response, other, err := WaitForResponse(response_chan, time.Millisecond*100, sig_id)
|
2023-07-29 19:16:33 -06:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "GQL node response: %+v", response)
|
2023-11-01 19:32:36 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "GQL node other messages: %+v", other)
|
|
|
|
|
|
|
|
// for now, just put signals we didn't want back into the 'queue'
|
|
|
|
for _, other_signal := range(other) {
|
|
|
|
response_chan <- other_signal
|
|
|
|
}
|
2023-10-14 15:53:20 -06:00
|
|
|
|
|
|
|
error_signal, is_error := response.(*ErrorSignal)
|
|
|
|
if is_error {
|
|
|
|
errors = fmt.Sprintf("%s, %s", errors, error_signal.Error)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
read_response, is_read_response := response.(*ReadResultSignal)
|
|
|
|
if is_read_response == false {
|
|
|
|
errors = fmt.Sprintf("%s, wrong response type %+v", errors, reflect.TypeOf(response))
|
|
|
|
continue
|
|
|
|
}
|
2023-09-18 12:02:30 -06:00
|
|
|
|
|
|
|
idx := indices[sig_id]
|
|
|
|
responses[idx] = NodeResult{
|
2023-10-14 15:53:20 -06:00
|
|
|
read_response.NodeID,
|
|
|
|
read_response.NodeType,
|
|
|
|
read_response.Extensions,
|
2023-09-18 12:02:30 -06:00
|
|
|
}
|
|
|
|
|
2023-10-14 15:53:20 -06:00
|
|
|
cache, exists := ctx.NodeCache[read_response.NodeID]
|
2023-09-18 12:02:30 -06:00
|
|
|
if exists == true {
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "Merging new response with cached: %s, %+v - %+v", read_response.NodeID, cache, read_response.Extensions)
|
2023-10-14 15:53:20 -06:00
|
|
|
for ext_type, fields := range(read_response.Extensions) {
|
2023-09-18 12:02:30 -06:00
|
|
|
cached_fields, exists := cache.Data[ext_type]
|
2023-11-01 19:32:36 -06:00
|
|
|
if exists == false {
|
|
|
|
cached_fields = map[string]SerializedValue{}
|
|
|
|
cache.Data[ext_type] = cached_fields
|
|
|
|
}
|
|
|
|
for field_name, field_value := range(fields) {
|
|
|
|
cached_fields[field_name] = field_value
|
2023-09-18 12:02:30 -06:00
|
|
|
}
|
|
|
|
}
|
2023-10-15 22:43:11 -06:00
|
|
|
responses[idx] = cache
|
2023-09-18 12:02:30 -06:00
|
|
|
} else {
|
2023-10-15 22:43:11 -06:00
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "Adding new response to node cache: %s, %+v", read_response.NodeID, read_response.Extensions)
|
2023-10-14 15:53:20 -06:00
|
|
|
ctx.NodeCache[read_response.NodeID] = responses[idx]
|
2023-09-18 12:02:30 -06:00
|
|
|
}
|
2023-07-29 18:27:52 -06:00
|
|
|
}
|
2023-07-29 11:03:41 -06:00
|
|
|
|
2023-10-14 15:53:20 -06:00
|
|
|
if errors != "" {
|
|
|
|
return nil, fmt.Errorf(errors)
|
|
|
|
}
|
2023-10-15 22:43:11 -06:00
|
|
|
|
|
|
|
ctx.Context.Log.Logf("gql_resolve_node", "RESOLVED_NODES %+v - %+v", ids, responses)
|
2023-07-29 19:16:33 -06:00
|
|
|
return responses, nil
|
2023-07-21 15:16:35 -06:00
|
|
|
}
|