module Elasticsearch::API::Inference::Actions
Public Instance Methods
Source
# File lib/elasticsearch/api/actions/inference/chat_completion_unified.rb, line 34 def chat_completion_unified(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.chat_completion_unified' } defined_params = [:inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_POST path = "_inference/chat_completion/#{Utils.listify(_inference_id)}/_stream" params = Utils.process_params(arguments) Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Perform chat completion inference
@option arguments [String] :inference_id The inference Id (Required) @option arguments [Time] :timeout Specifies the amount of time to wait for the inference request to complete. Server default: 30s. @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body chat_completion_request
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-unified-inference
Source
# File lib/elasticsearch/api/actions/inference/completion.rb, line 34 def completion(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.completion' } defined_params = [:inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_POST path = "_inference/completion/#{Utils.listify(_inference_id)}" params = Utils.process_params(arguments) Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Perform completion inference on the service
@option arguments [String] :inference_id The inference Id (Required) @option arguments [Time] :timeout Specifies the amount of time to wait for the inference request to complete. Server default: 30s. @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference
Source
# File lib/elasticsearch/api/actions/inference/delete.rb, line 35 def delete(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.delete' } defined_params = [:inference_id, :task_type].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _task_type = arguments.delete(:task_type) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_DELETE path = if _task_type && _inference_id "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_inference_id)}" else "_inference/#{Utils.listify(_inference_id)}" end params = Utils.process_params(arguments) Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Delete an inference endpoint
@option arguments [String] :task_type The task type @option arguments [String] :inference_id The inference identifier. (Required) @option arguments [Boolean] :dry_run When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. @option arguments [Boolean] :force When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. @option arguments [Hash] :headers Custom HTTP headers
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-delete
Source
# File lib/elasticsearch/api/actions/inference/get.rb, line 33 def get(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.get' } defined_params = [:inference_id, :task_type].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil _task_type = arguments.delete(:task_type) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_GET path = if _task_type && _inference_id "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_inference_id)}" elsif _inference_id "_inference/#{Utils.listify(_inference_id)}" else '_inference' end params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Get an inference endpoint
@option arguments [String] :task_type The task type @option arguments [String] :inference_id The inference Id @option arguments [Hash] :headers Custom HTTP headers
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-get
Source
# File lib/elasticsearch/api/actions/inference/inference.rb, line 39 def inference(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.inference' } defined_params = [:inference_id, :task_type].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_POST path = if _task_type && _inference_id "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_inference_id)}" else "_inference/#{Utils.listify(_inference_id)}" end params = Utils.process_params(arguments) Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Perform inference on the service. This API
enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API
. For details about using this API
with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.
@option arguments [String] :task_type The type of inference task that the model performs. @option arguments [String] :inference_id The unique identifier for the inference endpoint. (Required) @option arguments [Time] :timeout The amount of time to wait for the inference request to complete. Server default: 30s. @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference
Source
# File lib/elasticsearch/api/actions/inference/put.rb, line 42 def put(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put' } defined_params = [:inference_id, :task_type].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_PUT path = if _task_type && _inference_id "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_inference_id)}" else "_inference/#{Utils.listify(_inference_id)}" end params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
@option arguments [String] :task_type The task type @option arguments [String] :inference_id The inference Id (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body inference_config
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put
Source
# File lib/elasticsearch/api/actions/inference/put_alibabacloud.rb, line 40 def put_alibabacloud(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_alibabacloud' } defined_params = [:task_type, :alibabacloud_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:alibabacloud_inference_id] raise ArgumentError, "Required argument 'alibabacloud_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _alibabacloud_inference_id = arguments.delete(:alibabacloud_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_alibabacloud_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the alibabacloud-ai-search
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :alibabacloud_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-alibabacloud
Source
# File lib/elasticsearch/api/actions/inference/put_amazonbedrock.rb, line 35 def put_amazonbedrock(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_amazonbedrock' } defined_params = [:task_type, :amazonbedrock_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:amazonbedrock_inference_id] raise ArgumentError, "Required argument 'amazonbedrock_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _amazonbedrock_inference_id = arguments.delete(:amazonbedrock_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_amazonbedrock_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the amazonbedrock
service.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :amazonbedrock_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-amazonbedrock
Source
# File lib/elasticsearch/api/actions/inference/put_anthropic.rb, line 41 def put_anthropic(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_anthropic' } defined_params = [:task_type, :anthropic_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:anthropic_inference_id] raise ArgumentError, "Required argument 'anthropic_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _anthropic_inference_id = arguments.delete(:anthropic_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_anthropic_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the anthropic
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The task type.
The only valid task type for the model to perform is +completion+. (*Required*)
@option arguments [String] :anthropic_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-anthropic
Source
# File lib/elasticsearch/api/actions/inference/put_azureaistudio.rb, line 40 def put_azureaistudio(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_azureaistudio' } defined_params = [:task_type, :azureaistudio_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:azureaistudio_inference_id] raise ArgumentError, "Required argument 'azureaistudio_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _azureaistudio_inference_id = arguments.delete(:azureaistudio_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_azureaistudio_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the azureaistudio
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :azureaistudio_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureaistudio
Source
# File lib/elasticsearch/api/actions/inference/put_azureopenai.rb, line 45 def put_azureopenai(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_azureopenai' } defined_params = [:task_type, :azureopenai_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:azureopenai_inference_id] raise ArgumentError, "Required argument 'azureopenai_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _azureopenai_inference_id = arguments.delete(:azureopenai_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_azureopenai_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the azureopenai
service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include:
-
{learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models GPT-4 and GPT-4 Turbo models}
The list of embeddings models that you can choose from in your deployment can be found in the {learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings Azure models documentation}. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform.
NOTE: The +chat_completion+ task type only supports streaming and only through the _stream API. (*Required*)
@option arguments [String] :azureopenai_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureopenai
Source
# File lib/elasticsearch/api/actions/inference/put_cohere.rb, line 40 def put_cohere(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_cohere' } defined_params = [:task_type, :cohere_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:cohere_inference_id] raise ArgumentError, "Required argument 'cohere_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _cohere_inference_id = arguments.delete(:cohere_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_cohere_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the cohere
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :cohere_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-cohere
Source
# File lib/elasticsearch/api/actions/inference/put_elasticsearch.rb, line 36 def put_elasticsearch(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_elasticsearch' } defined_params = [:task_type, :elasticsearch_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:elasticsearch_inference_id] raise ArgumentError, "Required argument 'elasticsearch_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _elasticsearch_inference_id = arguments.delete(:elasticsearch_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_elasticsearch_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an Elasticsearch
inference endpoint. Create an inference endpoint to perform an inference task with the elasticsearch
service.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :elasticsearch_inference_id The unique identifier of the inference endpoint.
The must not match the +model_id+. (*Required*)
@option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-elasticsearch
Source
# File lib/elasticsearch/api/actions/inference/put_elser.rb, line 36 def put_elser(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_elser' } defined_params = [:task_type, :elser_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] raise ArgumentError, "Required argument 'elser_inference_id' missing" unless arguments[:elser_inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _elser_inference_id = arguments.delete(:elser_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_elser_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an ELSER inference endpoint. Create an inference endpoint to perform an inference task with the elser
service. You can also deploy ELSER by using the Elasticsearch
inference integration.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :elser_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-elser
Source
# File lib/elasticsearch/api/actions/inference/put_googleaistudio.rb, line 40 def put_googleaistudio(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_googleaistudio' } defined_params = [:task_type, :googleaistudio_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:googleaistudio_inference_id] raise ArgumentError, "Required argument 'googleaistudio_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _googleaistudio_inference_id = arguments.delete(:googleaistudio_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_googleaistudio_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the googleaistudio
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :googleaistudio_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googleaistudio
Source
# File lib/elasticsearch/api/actions/inference/put_googlevertexai.rb, line 40 def put_googlevertexai(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_googlevertexai' } defined_params = [:task_type, :googlevertexai_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:googlevertexai_inference_id] raise ArgumentError, "Required argument 'googlevertexai_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _googlevertexai_inference_id = arguments.delete(:googlevertexai_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_googlevertexai_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the googlevertexai
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :googlevertexai_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googlevertexai
Source
# File lib/elasticsearch/api/actions/inference/put_hugging_face.rb, line 51 def put_hugging_face(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_hugging_face' } defined_params = [:task_type, :huggingface_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:huggingface_inference_id] raise ArgumentError, "Required argument 'huggingface_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _huggingface_inference_id = arguments.delete(:huggingface_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_huggingface_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the hugging_face
service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example intfloat/e5-small-v2
), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service:
-
all-MiniLM-L6-v2
-
all-MiniLM-L12-v2
-
all-mpnet-base-v2
-
e5-base-v2
-
e5-small-v2
-
multilingual-e5-base
-
multilingual-e5-small
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :huggingface_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-hugging-face
Source
# File lib/elasticsearch/api/actions/inference/put_jinaai.rb, line 42 def put_jinaai(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_jinaai' } defined_params = [:task_type, :jinaai_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:jinaai_inference_id] raise ArgumentError, "Required argument 'jinaai_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _jinaai_inference_id = arguments.delete(:jinaai_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_jinaai_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the jinaai
service. To review the available rerank
models, refer to <jina.ai/reranker>. To review the available text_embedding
models, refer to the <jina.ai/embeddings/>. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :jinaai_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-jinaai
Source
# File lib/elasticsearch/api/actions/inference/put_mistral.rb, line 41 def put_mistral(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_mistral' } defined_params = [:task_type, :mistral_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:mistral_inference_id] raise ArgumentError, "Required argument 'mistral_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _mistral_inference_id = arguments.delete(:mistral_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_mistral_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the mistral
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The task type.
The only valid task type for the model to perform is +text_embedding+. (*Required*)
@option arguments [String] :mistral_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-mistral
Source
# File lib/elasticsearch/api/actions/inference/put_openai.rb, line 41 def put_openai(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_openai' } defined_params = [:task_type, :openai_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:openai_inference_id] raise ArgumentError, "Required argument 'openai_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _openai_inference_id = arguments.delete(:openai_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_openai_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the openai
service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform.
NOTE: The +chat_completion+ task type only supports streaming and only through the _stream API. (*Required*)
@option arguments [String] :openai_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-openai
Source
# File lib/elasticsearch/api/actions/inference/put_voyageai.rb, line 36 def put_voyageai(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_voyageai' } defined_params = [:task_type, :voyageai_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:voyageai_inference_id] raise ArgumentError, "Required argument 'voyageai_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _voyageai_inference_id = arguments.delete(:voyageai_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_voyageai_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create a VoyageAI inference endpoint. Create an inference endpoint to perform an inference task with the voyageai
service. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The type of the inference task that the model will perform. (Required) @option arguments [String] :voyageai_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-voyageai
Source
# File lib/elasticsearch/api/actions/inference/put_watsonx.rb, line 43 def put_watsonx(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.put_watsonx' } defined_params = [:task_type, :watsonx_inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'task_type' missing" unless arguments[:task_type] unless arguments[:watsonx_inference_id] raise ArgumentError, "Required argument 'watsonx_inference_id' missing" end arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _task_type = arguments.delete(:task_type) _watsonx_inference_id = arguments.delete(:watsonx_inference_id) method = Elasticsearch::API::HTTP_PUT path = "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_watsonx_inference_id)}" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the watsonxai
service. You need an IBM Cloud Databases for Elasticsearch
deployment to use the watsonxai
inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API
, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API
. Look for +“state”: “fully_allocated”+ in the response and ensure that the +“allocation_count”+ matches the +“target_allocation_count”+. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
@option arguments [String] :task_type The task type.
The only valid task type for the model to perform is +text_embedding+. (*Required*)
@option arguments [String] :watsonx_inference_id The unique identifier of the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-watsonx
Source
# File lib/elasticsearch/api/actions/inference/rerank.rb, line 34 def rerank(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.rerank' } defined_params = [:inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_POST path = "_inference/rerank/#{Utils.listify(_inference_id)}" params = Utils.process_params(arguments) Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Perform rereanking inference on the service
@option arguments [String] :inference_id The unique identifier for the inference endpoint. (Required) @option arguments [Time] :timeout The amount of time to wait for the inference request to complete. Server default: 30s. @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference
Source
# File lib/elasticsearch/api/actions/inference/sparse_embedding.rb, line 34 def sparse_embedding(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.sparse_embedding' } defined_params = [:inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_POST path = "_inference/sparse_embedding/#{Utils.listify(_inference_id)}" params = Utils.process_params(arguments) Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Perform sparse embedding inference on the service
@option arguments [String] :inference_id The inference Id (Required) @option arguments [Time] :timeout Specifies the amount of time to wait for the inference request to complete. Server default: 30s. @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference
Source
# File lib/elasticsearch/api/actions/inference/stream_completion.rb, line 37 def stream_completion(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.stream_completion' } defined_params = [:inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_POST path = "_inference/completion/#{Utils.listify(_inference_id)}/_stream" params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API
works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API
requires the monitor_inference
cluster privilege (the built-in inference_admin
and inference_user
roles grant this privilege). You must use a client that supports streaming.
@option arguments [String] :inference_id The unique identifier for the inference endpoint. (Required) @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-stream-inference
Source
# File lib/elasticsearch/api/actions/inference/text_embedding.rb, line 34 def text_embedding(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.text_embedding' } defined_params = [:inference_id].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _inference_id = arguments.delete(:inference_id) method = Elasticsearch::API::HTTP_POST path = "_inference/text_embedding/#{Utils.listify(_inference_id)}" params = Utils.process_params(arguments) Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Perform text embedding inference on the service
@option arguments [String] :inference_id The inference Id (Required) @option arguments [Time] :timeout Specifies the amount of time to wait for the inference request to complete. Server default: 30s. @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body request body
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference
Source
# File lib/elasticsearch/api/actions/inference/update.rb, line 38 def update(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'inference.update' } defined_params = [:inference_id, :task_type].each_with_object({}) do |variable, set_variables| set_variables[variable] = arguments[variable] if arguments.key?(variable) end request_opts[:defined_params] = defined_params unless defined_params.empty? raise ArgumentError, "Required argument 'body' missing" unless arguments[:body] raise ArgumentError, "Required argument 'inference_id' missing" unless arguments[:inference_id] arguments = arguments.clone headers = arguments.delete(:headers) || {} body = arguments.delete(:body) _inference_id = arguments.delete(:inference_id) _task_type = arguments.delete(:task_type) method = Elasticsearch::API::HTTP_PUT path = if _task_type && _inference_id "_inference/#{Utils.listify(_task_type)}/#{Utils.listify(_inference_id)}/_update" else "_inference/#{Utils.listify(_inference_id)}/_update" end params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Update an inference endpoint. Modify task_settings
, secrets (within service_settings
), or num_allocations
for an inference endpoint, depending on the specific endpoint service and task_type
. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
@option arguments [String] :inference_id The unique identifier of the inference endpoint. (Required) @option arguments [String] :task_type The type of inference task that the model performs. @option arguments [Hash] :headers Custom HTTP headers @option arguments [Hash] :body inference_config
@see www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-update