module NewRelic::Agent::Instrumentation::OpenAI

Constants

CHAT_COMPLETIONS_PATH
CHAT_COMPLETIONS_SEGMENT_NAME
EMBEDDINGS_PATH
EMBEDDINGS_SEGMENT_NAME
INSTRUMENTATION_NAME
VENDOR

Public Instance Methods

json_post_with_new_relic(path:, parameters:) { || ... } click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 14
def json_post_with_new_relic(path:, parameters:)
  return yield unless path == EMBEDDINGS_PATH || path == CHAT_COMPLETIONS_PATH

  NewRelic::Agent.record_instrumentation_invocation(INSTRUMENTATION_NAME)
  NewRelic::Agent::Llm::LlmEvent.set_llm_agent_attribute_on_transaction
  record_openai_metric

  if path == EMBEDDINGS_PATH
    embeddings_instrumentation(parameters) { yield }
  elsif path == CHAT_COMPLETIONS_PATH
    chat_completions_instrumentation(parameters) { yield }
  end
end

Private Instance Methods

add_chat_completion_response_params(parameters, response, event) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 87
def add_chat_completion_response_params(parameters, response, event)
  event.response_number_of_messages = (parameters[:messages] || parameters['messages']).size + response['choices'].size
  # The response hash always returns keys as strings, so we don't need to run an || check here
  event.response_model = response['model']
  event.response_choices_finish_reason = response['choices'][0]['finish_reason']
end
add_content(message, content) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 159
def add_content(message, content)
  message.content = content if record_content_enabled?
end
add_embeddings_response_params(response, event) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 94
def add_embeddings_response_params(response, event)
  event.response_model = response['model']
  event.token_count = calculate_token_count(event.request_model, event.input)
end
add_input(event, input) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 163
def add_input(event, input)
  event.input = input if record_content_enabled?
end
calculate_token_count(model, content) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 143
def calculate_token_count(model, content)
  return unless NewRelic::Agent.llm_token_count_callback

  begin
    count = NewRelic::Agent.llm_token_count_callback.call({model: model, content: content})
  rescue => e
    NewRelic::Agent.logger.warn("Error calculating token count using the provided proc. Error: #{e}'")
  end

  count if count.is_a?(Integer) && count > 0
end
chat_completions_instrumentation(parameters) { || ... } click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 45
def chat_completions_instrumentation(parameters)
  segment = NewRelic::Agent::Tracer.start_segment(name: CHAT_COMPLETIONS_SEGMENT_NAME)
  event = create_chat_completion_summary(parameters)
  segment.llm_event = event
  messages = create_chat_completion_messages(parameters, event.id)

  begin
    response = NewRelic::Agent::Tracer.capture_segment_error(segment) { yield }
    # TODO: Remove !response.include?('error') when we drop support for versions below 4.0.0
    if response && !response.include?('error')
      add_chat_completion_response_params(parameters, response, event)
      messages = update_chat_completion_messages(messages, response, event)
    end

    response
  ensure
    finish(segment, event)
    messages&.each { |m| m.record }
  end
end
create_chat_completion_messages(parameters, summary_id) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 99
def create_chat_completion_messages(parameters, summary_id)
  (parameters[:messages] || parameters['messages']).map.with_index do |message, index|
    msg = NewRelic::Agent::Llm::ChatCompletionMessage.new(
      role: message[:role] || message['role'],
      sequence: index,
      completion_id: summary_id,
      vendor: VENDOR
    )
    add_content(msg, (message[:content] || message['content']))

    msg
  end
end
create_chat_completion_response_messages(response, sequence_origin, summary_id) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 113
def create_chat_completion_response_messages(response, sequence_origin, summary_id)
  response['choices'].map.with_index(sequence_origin) do |choice, index|
    msg = NewRelic::Agent::Llm::ChatCompletionMessage.new(
      role: choice['message']['role'],
      sequence: index,
      completion_id: summary_id,
      vendor: VENDOR,
      is_response: true
    )
    add_content(msg, choice['message']['content'])

    msg
  end
end
create_chat_completion_summary(parameters) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 66
def create_chat_completion_summary(parameters)
  NewRelic::Agent::Llm::ChatCompletionSummary.new(
    vendor: VENDOR,
    request_max_tokens: (parameters[:max_tokens] || parameters['max_tokens'])&.to_i,
    request_model: parameters[:model] || parameters['model'],
    request_temperature: (parameters[:temperature] || parameters['temperature'])&.to_f,
    metadata: llm_custom_attributes
  )
end
create_embeddings_event(parameters) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 76
def create_embeddings_event(parameters)
  event = NewRelic::Agent::Llm::Embedding.new(
    vendor: VENDOR,
    request_model: parameters[:model] || parameters['model'],
    metadata: llm_custom_attributes
  )
  add_input(event, (parameters[:input] || parameters['input']))

  event
end
embeddings_instrumentation(parameters) { || ... } click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 30
def embeddings_instrumentation(parameters)
  segment = NewRelic::Agent::Tracer.start_segment(name: EMBEDDINGS_SEGMENT_NAME)
  event = create_embeddings_event(parameters)
  segment.llm_event = event
  begin
    response = NewRelic::Agent::Tracer.capture_segment_error(segment) { yield }
    # TODO: Remove !response.include?('error') when we drop support for versions below 4.0.0
    add_embeddings_response_params(response, event) if response && !response.include?('error')

    response
  ensure
    finish(segment, event)
  end
end
finish(segment, event) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 183
def finish(segment, event)
  segment&.finish

  return unless event

  if segment
    event.error = true if segment_noticed_error?(segment)
    event.duration = segment.duration
  end

  event.record
end
llm_custom_attributes() click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 167
def llm_custom_attributes
  NewRelic::Agent::Tracer.current_transaction&.attributes&.custom_attributes&.select { |k| k.to_s.match(/llm.*/) }
end
nr_supportability_metric() click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 179
def nr_supportability_metric
  @nr_supportability_metric ||= "Supportability/Ruby/ML/OpenAI/#{::OpenAI::VERSION}"
end
record_content_enabled?() click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 155
def record_content_enabled?
  NewRelic::Agent.config[:'ai_monitoring.record_content.enabled']
end
record_openai_metric() click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 171
def record_openai_metric
  NewRelic::Agent.record_metric(nr_supportability_metric, 0.0)
end
segment_noticed_error?(segment) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 175
def segment_noticed_error?(segment)
  segment&.noticed_error
end
update_chat_completion_messages(messages, response, summary) click to toggle source
# File lib/new_relic/agent/instrumentation/ruby_openai/instrumentation.rb, line 128
def update_chat_completion_messages(messages, response, summary)
  messages += create_chat_completion_response_messages(response, messages.size, summary.id)
  response_id = response['id'] || NewRelic::Agent::GuidGenerator.generate_guid
  messages.each do |message|
    message.id = "#{response_id}-#{message.sequence}"
    message.request_id = summary.request_id
    message.response_model = response['model']
    message.metadata = llm_custom_attributes

    model = message.is_response ? message.response_model : summary.request_model

    message.token_count = calculate_token_count(model, message.content)
  end
end