diff --git a/lib/graphql/dataloader.rb b/lib/graphql/dataloader.rb index 92ddac4f6c..ca8c77725d 100644 --- a/lib/graphql/dataloader.rb +++ b/lib/graphql/dataloader.rb @@ -64,6 +64,7 @@ def initialize(nonblocking: self.class.default_nonblocking, fiber_limit: self.cl @nonblocking = nonblocking end @fiber_limit = fiber_limit + @lazies_at_depth = Hash.new { |h, k| h[k] = [] } end # @return [Integer, nil] @@ -140,10 +141,10 @@ def yield(source = Fiber[:__graphql_current_dataloader_source]) end # @api private Nothing to see here - def append_job(&job) + def append_job(callable = nil, &job) # Given a block, queue it up to be worked through when `#run` is called. - # (If the dataloader is already running, than a Fiber will pick this up later.) - @pending_jobs.push(job) + # (If the dataloader is already running, then a Fiber will pick this up later.) + @pending_jobs.push(callable || job) nil end @@ -160,6 +161,10 @@ def clear_cache def run_isolated prev_queue = @pending_jobs prev_pending_keys = {} + prev_lazies_at_depth = @lazies_at_depth + @lazies_at_depth = @lazies_at_depth.dup.clear + # Clear pending loads but keep already-cached records + # in case they are useful to the given block. @source_cache.each do |source_class, batched_sources| batched_sources.each do |batch_args, batched_source_instance| if batched_source_instance.pending? @@ -179,6 +184,7 @@ def run_isolated res ensure @pending_jobs = prev_queue + @lazies_at_depth = prev_lazies_at_depth prev_pending_keys.each do |source_instance, pending| pending.each do |key, value| if !source_instance.results.key?(key) @@ -188,7 +194,8 @@ def run_isolated end end - def run + # @param trace_query_lazy [nil, Execution::Multiplex] + def run(trace_query_lazy: nil) trace = Fiber[:__graphql_current_multiplex]&.current_trace jobs_fiber_limit, total_fiber_limit = calculate_fiber_limit job_fibers = [] @@ -201,26 +208,13 @@ def run while first_pass || !job_fibers.empty? first_pass = false - while (f = (job_fibers.shift || (((next_job_fibers.size + job_fibers.size) < jobs_fiber_limit) && spawn_job_fiber(trace)))) - if f.alive? - finished = run_fiber(f) - if !finished - next_job_fibers << f - end - end - end - join_queues(job_fibers, next_job_fibers) - - while (!source_fibers.empty? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }) - while (f = source_fibers.shift || (((job_fibers.size + source_fibers.size + next_source_fibers.size + next_job_fibers.size) < total_fiber_limit) && spawn_source_fiber(trace))) - if f.alive? - finished = run_fiber(f) - if !finished - next_source_fibers << f - end - end + run_pending_steps(trace, job_fibers, next_job_fibers, jobs_fiber_limit, source_fibers, next_source_fibers, total_fiber_limit) + + if !@lazies_at_depth.empty? + with_trace_query_lazy(trace_query_lazy) do + run_next_pending_lazies(job_fibers, trace) + run_pending_steps(trace, job_fibers, next_job_fibers, jobs_fiber_limit, source_fibers, next_source_fibers, total_fiber_limit) end - join_queues(source_fibers, next_source_fibers) end end @@ -248,6 +242,11 @@ def run_fiber(f) f.resume end + # @api private + def lazy_at_depth(depth, lazy) + @lazies_at_depth[depth] << lazy + end + def spawn_fiber fiber_vars = get_fiber_variables Fiber.new(blocking: !@nonblocking) { @@ -275,6 +274,59 @@ def merge_records(records, index_by: :id) private + def run_next_pending_lazies(job_fibers, trace) + smallest_depth = nil + @lazies_at_depth.each_key do |depth_key| + smallest_depth ||= depth_key + if depth_key < smallest_depth + smallest_depth = depth_key + end + end + + if smallest_depth + lazies = @lazies_at_depth.delete(smallest_depth) + if !lazies.empty? + lazies.each_with_index do |l, idx| + append_job { l.value } + end + job_fibers.unshift(spawn_job_fiber(trace)) + end + end + end + + def run_pending_steps(trace, job_fibers, next_job_fibers, jobs_fiber_limit, source_fibers, next_source_fibers, total_fiber_limit) + while (f = (job_fibers.shift || (((next_job_fibers.size + job_fibers.size) < jobs_fiber_limit) && spawn_job_fiber(trace)))) + if f.alive? + finished = run_fiber(f) + if !finished + next_job_fibers << f + end + end + end + join_queues(job_fibers, next_job_fibers) + + while (!source_fibers.empty? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }) + while (f = source_fibers.shift || (((job_fibers.size + source_fibers.size + next_source_fibers.size + next_job_fibers.size) < total_fiber_limit) && spawn_source_fiber(trace))) + if f.alive? + finished = run_fiber(f) + if !finished + next_source_fibers << f + end + end + end + join_queues(source_fibers, next_source_fibers) + end + end + + def with_trace_query_lazy(multiplex_or_nil, &block) + if (multiplex = multiplex_or_nil) + query = multiplex.queries.length == 1 ? multiplex.queries[0] : nil + multiplex.current_trace.execute_query_lazy(query: query, multiplex: multiplex, &block) + else + yield + end + end + def calculate_fiber_limit total_fiber_limit = @fiber_limit || Float::INFINITY if total_fiber_limit < 4 diff --git a/lib/graphql/dataloader/async_dataloader.rb b/lib/graphql/dataloader/async_dataloader.rb index cdd77d4e61..9781dda03b 100644 --- a/lib/graphql/dataloader/async_dataloader.rb +++ b/lib/graphql/dataloader/async_dataloader.rb @@ -14,7 +14,7 @@ def yield(source = Fiber[:__graphql_current_dataloader_source]) nil end - def run + def run(trace_query_lazy: nil) trace = Fiber[:__graphql_current_multiplex]&.current_trace jobs_fiber_limit, total_fiber_limit = calculate_fiber_limit job_fibers = [] @@ -29,16 +29,7 @@ def run first_pass = false fiber_vars = get_fiber_variables - while (f = (job_fibers.shift || (((job_fibers.size + next_job_fibers.size + source_tasks.size) < jobs_fiber_limit) && spawn_job_fiber(trace)))) - if f.alive? - finished = run_fiber(f) - if !finished - next_job_fibers << f - end - end - end - job_fibers.concat(next_job_fibers) - next_job_fibers.clear + run_pending_steps(job_fibers, next_job_fibers, source_tasks, jobs_fiber_limit, trace) Sync do |root_task| set_fiber_variables(fiber_vars) @@ -54,6 +45,13 @@ def run next_source_tasks.clear end end + + if !@lazies_at_depth.empty? + with_trace_query_lazy(trace_query_lazy) do + run_next_pending_lazies(job_fibers, trace) + run_pending_steps(job_fibers, next_job_fibers, source_tasks, jobs_fiber_limit, trace) + end + end end trace&.end_dataloader(self) end @@ -69,6 +67,19 @@ def run private + def run_pending_steps(job_fibers, next_job_fibers, source_tasks, jobs_fiber_limit, trace) + while (f = (job_fibers.shift || (((job_fibers.size + next_job_fibers.size + source_tasks.size) < jobs_fiber_limit) && spawn_job_fiber(trace)))) + if f.alive? + finished = run_fiber(f) + if !finished + next_job_fibers << f + end + end + end + job_fibers.concat(next_job_fibers) + next_job_fibers.clear + end + def spawn_source_task(parent_task, condition, trace) pending_sources = nil @source_cache.each_value do |source_by_batch_params| diff --git a/lib/graphql/dataloader/null_dataloader.rb b/lib/graphql/dataloader/null_dataloader.rb index 7fd222d7fe..61b7aa820e 100644 --- a/lib/graphql/dataloader/null_dataloader.rb +++ b/lib/graphql/dataloader/null_dataloader.rb @@ -2,24 +2,58 @@ module GraphQL class Dataloader - # The default implementation of dataloading -- all no-ops. + # GraphQL-Ruby uses this when Dataloader isn't enabled. # - # The Dataloader interface isn't public, but it enables - # simple internal code while adding the option to add Dataloader. + # It runs execution code inline and gathers lazy objects (eg. Promises) + # and resolves them during {#run}. class NullDataloader < Dataloader - # These are all no-ops because code was - # executed synchronously. + def initialize(*) + @lazies_at_depth = Hash.new { |h,k| h[k] = [] } + end + + def freeze + @lazies_at_depth.default_proc = nil + @lazies_at_depth.freeze + super + end + + def run(trace_query_lazy: nil) + with_trace_query_lazy(trace_query_lazy) do + while !@lazies_at_depth.empty? + smallest_depth = nil + @lazies_at_depth.each_key do |depth_key| + smallest_depth ||= depth_key + if depth_key < smallest_depth + smallest_depth = depth_key + end + end + + if smallest_depth + lazies = @lazies_at_depth.delete(smallest_depth) + lazies.each(&:value) # resolve these Lazy instances + end + end + end + end + + def run_isolated + new_dl = self.class.new + res = nil + new_dl.append_job { + res = yield + } + new_dl.run + res + end - def initialize(*); end - def run; end - def run_isolated; yield; end def clear_cache; end + def yield(_source) raise GraphQL::Error, "GraphQL::Dataloader is not running -- add `use GraphQL::Dataloader` to your schema to use Dataloader sources." end - def append_job - yield + def append_job(callable = nil) + callable ? callable.call : yield nil end diff --git a/lib/graphql/execution/interpreter.rb b/lib/graphql/execution/interpreter.rb index ba0b94b2aa..bbc778355d 100644 --- a/lib/graphql/execution/interpreter.rb +++ b/lib/graphql/execution/interpreter.rb @@ -42,7 +42,6 @@ def run_all(schema, query_options, context: {}, max_complexity: schema.max_compl trace.execute_multiplex(multiplex: multiplex) do schema = multiplex.schema queries = multiplex.queries - lazies_at_depth = Hash.new { |h, k| h[k] = [] } multiplex_analyzers = schema.multiplex_analyzers if multiplex.max_complexity multiplex_analyzers += [GraphQL::Analysis::MaxQueryComplexity] @@ -73,7 +72,7 @@ def run_all(schema, query_options, context: {}, max_complexity: schema.max_compl # Although queries in a multiplex _share_ an Interpreter instance, # they also have another item of state, which is private to that query # in particular, assign it here: - runtime = Runtime.new(query: query, lazies_at_depth: lazies_at_depth) + runtime = Runtime.new(query: query) query.context.namespace(:interpreter_runtime)[:runtime] = runtime query.current_trace.execute_query(query: query) do @@ -88,16 +87,7 @@ def run_all(schema, query_options, context: {}, max_complexity: schema.max_compl } end - multiplex.dataloader.run - - # Then, work through lazy results in a breadth-first way - multiplex.dataloader.append_job { - query = multiplex.queries.length == 1 ? multiplex.queries[0] : nil - multiplex.current_trace.execute_query_lazy(multiplex: multiplex, query: query) do - Interpreter::Resolve.resolve_each_depth(lazies_at_depth, multiplex.dataloader) - end - } - multiplex.dataloader.run + multiplex.dataloader.run(trace_query_lazy: multiplex) # Then, find all errors and assign the result to the query object results.each_with_index do |data_result, idx| diff --git a/lib/graphql/execution/interpreter/resolve.rb b/lib/graphql/execution/interpreter/resolve.rb index 570ab48e9d..102ceacb3c 100644 --- a/lib/graphql/execution/interpreter/resolve.rb +++ b/lib/graphql/execution/interpreter/resolve.rb @@ -6,12 +6,17 @@ class Interpreter module Resolve # Continue field results in `results` until there's nothing else to continue. # @return [void] + # @deprecated Call `dataloader.run` instead def self.resolve_all(results, dataloader) + warn "#{self}.#{__method__} is deprecated; Use `dataloader.run` instead.#{caller(1, 5).map { |l| "\n #{l}"}.join}" dataloader.append_job { resolve(results, dataloader) } nil end + # @deprecated Call `dataloader.run` instead def self.resolve_each_depth(lazies_at_depth, dataloader) + warn "#{self}.#{__method__} is deprecated; Use `dataloader.run` instead.#{caller(1, 5).map { |l| "\n #{l}"}.join}" + smallest_depth = nil lazies_at_depth.each_key do |depth_key| smallest_depth ||= depth_key @@ -34,20 +39,9 @@ def self.resolve_each_depth(lazies_at_depth, dataloader) nil end - # After getting `results` back from an interpreter evaluation, - # continue it until you get a response-ready Ruby value. - # - # `results` is one level of _depth_ of a query or multiplex. - # - # Resolve all lazy values in that depth before moving on - # to the next level. - # - # It's assumed that the lazies will - # return {Lazy} instances if there's more work to be done, - # or return {Hash}/{Array} if the query should be continued. - # - # @return [void] + # @deprecated Call `dataloader.run` instead def self.resolve(results, dataloader) + warn "#{self}.#{__method__} is deprecated; Use `dataloader.run` instead.#{caller(1, 5).map { |l| "\n #{l}"}.join}" # There might be pending jobs here that _will_ write lazies # into the result hash. We should run them out, so we # can be sure that all lazies will be present in the result hashes. diff --git a/lib/graphql/execution/interpreter/runtime.rb b/lib/graphql/execution/interpreter/runtime.rb index 3c1635b945..03a3ff976a 100644 --- a/lib/graphql/execution/interpreter/runtime.rb +++ b/lib/graphql/execution/interpreter/runtime.rb @@ -35,11 +35,10 @@ def current_object # @return [GraphQL::Query::Context] attr_reader :context - def initialize(query:, lazies_at_depth:) + def initialize(query:) @query = query @current_trace = query.current_trace @dataloader = query.multiplex.dataloader - @lazies_at_depth = lazies_at_depth @schema = query.schema @context = query.context @response = nil @@ -365,6 +364,10 @@ def evaluate_selection(result_name, field_ast_nodes_or_ast_node, selections_resu else @query.arguments_cache.dataload_for(ast_node, field_defn, owner_object) do |resolved_arguments| runtime_state = get_current_runtime_state # This might be in a different fiber + runtime_state.current_field = field_defn + runtime_state.current_arguments = resolved_arguments + runtime_state.current_result_name = result_name + runtime_state.current_result = selections_result evaluate_selection_with_args(resolved_arguments, field_defn, ast_node, field_ast_nodes, owner_object, result_name, selections_result, runtime_state) end end @@ -446,7 +449,7 @@ def evaluate_selection_with_resolved_keyword_args(kwarg_arguments, resolved_argu } end - field_result = call_method_on_directives(:resolve, object, directives) do + call_method_on_directives(:resolve, object, directives) do if !directives.empty? # This might be executed in a different context; reset this info runtime_state = get_current_runtime_state @@ -489,7 +492,7 @@ def evaluate_selection_with_resolved_keyword_args(kwarg_arguments, resolved_argu # all of its child fields before moving on to the next root mutation field. # (Subselections of this mutation will still be resolved level-by-level.) if selection_result.graphql_is_eager - Interpreter::Resolve.resolve_all([field_result], @dataloader) + @dataloader.run end end @@ -673,7 +676,11 @@ def continue_field(value, owner_type, field, current_type, ast_node, next_select rescue GraphQL::ExecutionError => ex_err return continue_value(ex_err, field, is_non_null, ast_node, result_name, selection_result) rescue StandardError => err - query.handle_or_reraise(err) + begin + query.handle_or_reraise(err) + rescue GraphQL::ExecutionError => ex_err + return continue_value(ex_err, field, is_non_null, ast_node, result_name, selection_result) + end end set_result(selection_result, result_name, r, false, is_non_null) r @@ -934,7 +941,7 @@ def after_lazy(lazy_obj, field:, owner_object:, arguments:, ast_node:, result:, current_depth += 1 result = result.graphql_parent end - @lazies_at_depth[current_depth] << lazy + @dataloader.lazy_at_depth(current_depth, lazy) lazy end else diff --git a/spec/graphql/backtrace_spec.rb b/spec/graphql/backtrace_spec.rb index ddcfc87f62..7915c54fae 100644 --- a/spec/graphql/backtrace_spec.rb +++ b/spec/graphql/backtrace_spec.rb @@ -132,7 +132,7 @@ def execute_multiplex(multiplex:) b = err.cause.backtrace assert_backtrace_includes(b, file: "backtrace_spec.rb", method: "block") assert_backtrace_includes(b, file: "field.rb", method: "resolve") - assert_backtrace_includes(b, file: "runtime.rb", method: "evaluate_selections") + assert_backtrace_includes(b, file: "runtime.rb", method: "evaluate_selection") assert_backtrace_includes(b, file: "interpreter.rb", method: "run_all") # GraphQL backtrace is present diff --git a/spec/graphql/dataloader/snapshots/example.json b/spec/graphql/dataloader/snapshots/example.json index 2bdcede08b..1117679b89 100644 --- a/spec/graphql/dataloader/snapshots/example.json +++ b/spec/graphql/dataloader/snapshots/example.json @@ -2610,92 +2610,6 @@ }, "sequenceFlags": 101010101010 }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Dataloader Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010", - "10101010101010" - ], - "name": "Create Execution Fiber", - "extraCounterTrackUuids": [ - "10101010101010", - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Exec Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010" - ], - "name": "Fiber Exit", - "extraCounterTrackUuids": [ - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, { "timestamp": "10101010101010", "trustedPacketSequenceId": 101010101010, diff --git a/spec/graphql/dataloader_spec.rb b/spec/graphql/dataloader_spec.rb index d379c143ca..a5020de012 100644 --- a/spec/graphql/dataloader_spec.rb +++ b/spec/graphql/dataloader_spec.rb @@ -940,6 +940,7 @@ def self.included(child_class) query_str = "{ cookbooks { featuredRecipe { name } } }" context = { batched_calls_counter: BatchedCallsCounter.new } result = schema.execute(query_str, context: context) + assert_equal ["Cornbread", "Grits"], result["data"]["cookbooks"].map { |c| c["featuredRecipe"]["name"] } refute result.key?("errors") assert_equal 1, context[:batched_calls_counter].count end @@ -1190,17 +1191,17 @@ def assert_last_max_fiber_count(expected_last_max_fiber_count, message = nil) res = schema.execute(query_str, context: { dataloader: fiber_counting_dataloader_class.new }) assert_nil res.context.dataloader.fiber_limit - assert_equal 12, FiberCounting.last_spawn_fiber_count + assert_equal 10, FiberCounting.last_spawn_fiber_count assert_last_max_fiber_count(9, "No limit works as expected") res = schema.execute(query_str, context: { dataloader: fiber_counting_dataloader_class.new(fiber_limit: 4) }) assert_equal 4, res.context.dataloader.fiber_limit - assert_equal 14, FiberCounting.last_spawn_fiber_count + assert_equal 12, FiberCounting.last_spawn_fiber_count assert_last_max_fiber_count(4, "Limit of 4 works as expected") res = schema.execute(query_str, context: { dataloader: fiber_counting_dataloader_class.new(fiber_limit: 6) }) assert_equal 6, res.context.dataloader.fiber_limit - assert_equal 10, FiberCounting.last_spawn_fiber_count + assert_equal 8, FiberCounting.last_spawn_fiber_count assert_last_max_fiber_count(6, "Limit of 6 works as expected") end diff --git a/spec/graphql/execution/breadth_runtime_spec.rb b/spec/graphql/execution/breadth_runtime_spec.rb index 9fba9561dc..a5d44bbb50 100644 --- a/spec/graphql/execution/breadth_runtime_spec.rb +++ b/spec/graphql/execution/breadth_runtime_spec.rb @@ -18,7 +18,7 @@ def initialize(query:) max_complexity: nil, ) - super(query: query, lazies_at_depth: Hash.new { |h, k| h[k] = [] }) + super(query: query) @breadth_results_by_key = {} end @@ -50,7 +50,6 @@ def evaluate_breadth_selection(objects, parent_type, node) end @dataloader.run - GraphQL::Execution::Interpreter::Resolve.resolve_each_depth(@lazies_at_depth, @dataloader) @breadth_results_by_key[result_key] end diff --git a/spec/graphql/execution/interpreter_spec.rb b/spec/graphql/execution/interpreter_spec.rb index 798cb7dd74..87891f82da 100644 --- a/spec/graphql/execution/interpreter_spec.rb +++ b/spec/graphql/execution/interpreter_spec.rb @@ -239,17 +239,38 @@ def nested_query(query:) class Counter < GraphQL::Schema::Object field :value, Integer, null: false + + def value + counter.value + end + field :lazy_value, Integer, null: false def lazy_value - Box.new { object.value } + Box.new { counter.value } end + field :incremented_value, Integer, hash_key: :incremented_value + field :increment, Counter, null: false def increment - object.value += 1 - object + v = counter.value += 1 + { + counter: counter, + incremented_value: v, + } + end + + + private + + def counter + if object.is_a?(Hash) && object.key?(:counter) + object[:counter] + else + object + end end end @@ -258,8 +279,11 @@ class Mutation < GraphQL::Schema::Object def increment_counter counter = context[:counter] - counter.value += 1 - counter + v = counter.value += 1 + { + counter: counter, + incremented_value: v + } end end @@ -374,11 +398,11 @@ def execute_multiplex(multiplex:) query_str = <<-GRAPHQL mutation { i1: incrementCounter { value lazyValue - i2: increment { value lazyValue } - i3: increment { value lazyValue } + i2: increment { value incrementedValue lazyValue } + i3: increment { value incrementedValue lazyValue } } - i4: incrementCounter { value lazyValue } - i5: incrementCounter { value lazyValue } + i4: incrementCounter { value incrementedValue lazyValue } + i5: incrementCounter { value incrementedValue lazyValue } } GRAPHQL @@ -389,11 +413,11 @@ def execute_multiplex(multiplex:) # All of these get `3` as lazy value. They're resolved together, # since they aren't _root_ mutation fields. "lazyValue" => 3, - "i2" => { "value" => 2, "lazyValue" => 3 }, - "i3" => { "value" => 3, "lazyValue" => 3 }, + "i2" => { "value" => 2, "incrementedValue" => 2, "lazyValue" => 3 }, + "i3" => { "value" => 3, "incrementedValue" => 3, "lazyValue" => 3 }, }, - "i4" => { "value" => 4, "lazyValue" => 4}, - "i5" => { "value" => 5, "lazyValue" => 5}, + "i4" => { "value" => 4, "incrementedValue" => 4, "lazyValue" => 4}, + "i5" => { "value" => 5, "incrementedValue" => 5, "lazyValue" => 5}, } assert_graphql_equal expected_data, result["data"] end diff --git a/spec/graphql/execution/lazy_spec.rb b/spec/graphql/execution/lazy_spec.rb index 6c776a1fa4..eb295d3f5e 100644 --- a/spec/graphql/execution/lazy_spec.rb +++ b/spec/graphql/execution/lazy_spec.rb @@ -101,7 +101,7 @@ end end - it "Handles fields that return nil" do + it "Handles fields that return nil and batches lazy resultion across depths when possible" do values = [ LazyHelpers::MAGIC_NUMBER_THAT_RETURNS_NIL, LazyHelpers::MAGIC_NUMBER_WITH_LAZY_AUTHORIZED_HOOK, diff --git a/spec/graphql/schema/directive_spec.rb b/spec/graphql/schema/directive_spec.rb index 6b1d7b1faf..3dbfcc5027 100644 --- a/spec/graphql/schema/directive_spec.rb +++ b/spec/graphql/schema/directive_spec.rb @@ -117,7 +117,7 @@ def self.resolve(obj, args, ctx) result = nil ctx.dataloader.run_isolated do result = yield - GraphQL::Execution::Interpreter::Resolve.resolve_all([result], ctx.dataloader) + ctx.dataloader.run end ctx[:count_fields] ||= Hash.new { |h, k| h[k] = [] } diff --git a/spec/graphql/tracing/snapshots/example-rails-7-0.json b/spec/graphql/tracing/snapshots/example-rails-7-0.json index eec75d16d6..7e28d59bea 100644 --- a/spec/graphql/tracing/snapshots/example-rails-7-0.json +++ b/spec/graphql/tracing/snapshots/example-rails-7-0.json @@ -18786,92 +18786,6 @@ }, "sequenceFlags": 101010101010 }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Dataloader Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010", - "10101010101010" - ], - "name": "Create Execution Fiber", - "extraCounterTrackUuids": [ - "10101010101010", - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Exec Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010" - ], - "name": "Fiber Exit", - "extraCounterTrackUuids": [ - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, { "timestamp": "10101010101010", "trustedPacketSequenceId": 101010101010, diff --git a/spec/graphql/tracing/snapshots/example-rails-7-1.json b/spec/graphql/tracing/snapshots/example-rails-7-1.json index 5eff7725ec..f54d737abc 100644 --- a/spec/graphql/tracing/snapshots/example-rails-7-1.json +++ b/spec/graphql/tracing/snapshots/example-rails-7-1.json @@ -18782,92 +18782,6 @@ }, "sequenceFlags": 101010101010 }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Dataloader Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010", - "10101010101010" - ], - "name": "Create Execution Fiber", - "extraCounterTrackUuids": [ - "10101010101010", - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Exec Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010" - ], - "name": "Fiber Exit", - "extraCounterTrackUuids": [ - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, { "timestamp": "10101010101010", "trustedPacketSequenceId": 101010101010, diff --git a/spec/graphql/tracing/snapshots/example-rails-8-1.json b/spec/graphql/tracing/snapshots/example-rails-8-1.json index 5ed54a38d8..79fa536a3d 100644 --- a/spec/graphql/tracing/snapshots/example-rails-8-1.json +++ b/spec/graphql/tracing/snapshots/example-rails-8-1.json @@ -19214,92 +19214,6 @@ }, "sequenceFlags": 101010101010 }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Dataloader Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010", - "10101010101010" - ], - "name": "Create Execution Fiber", - "extraCounterTrackUuids": [ - "10101010101010", - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "trustedPacketSequenceId": 101010101010, - "sequenceFlags": 101010101010, - "trackDescriptor": { - "uuid": "10101010101010", - "name": "Exec Fiber #1010", - "parentUuid": "10101010101010", - "childOrdering": "CHRONOLOGICAL" - } - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "categories": [ - "Dataloader" - ], - "categoryIids": [ - "10101010101010" - ], - "type": "TYPE_INSTANT", - "trackUuid": "10101010101010", - "extraCounterValues": [ - "10101010101010" - ], - "name": "Fiber Exit", - "extraCounterTrackUuids": [ - "10101010101010" - ] - }, - "sequenceFlags": 101010101010 - }, - { - "timestamp": "10101010101010", - "trustedPacketSequenceId": 101010101010, - "trackEvent": { - "type": "TYPE_COUNTER", - "trackUuid": "10101010101010", - "counterValue": "10101010101010" - }, - "sequenceFlags": 101010101010 - }, { "timestamp": "10101010101010", "trustedPacketSequenceId": 101010101010,