AWS SDK for C++  1.9.103
AWS SDK for C++
Public Types | Public Member Functions | List of all members
Aws::MachineLearning::MachineLearningClient Class Reference

#include <MachineLearningClient.h>

+ Inheritance diagram for Aws::MachineLearning::MachineLearningClient:

Public Types

typedef Aws::Client::AWSJsonClient BASECLASS
 
- Public Types inherited from Aws::Client::AWSJsonClient
typedef AWSClient BASECLASS
 

Public Member Functions

 MachineLearningClient (const Aws::Client::ClientConfiguration &clientConfiguration=Aws::Client::ClientConfiguration())
 
 MachineLearningClient (const Aws::Auth::AWSCredentials &credentials, const Aws::Client::ClientConfiguration &clientConfiguration=Aws::Client::ClientConfiguration())
 
 MachineLearningClient (const std::shared_ptr< Aws::Auth::AWSCredentialsProvider > &credentialsProvider, const Aws::Client::ClientConfiguration &clientConfiguration=Aws::Client::ClientConfiguration())
 
virtual ~MachineLearningClient ()
 
virtual Model::AddTagsOutcome AddTags (const Model::AddTagsRequest &request) const
 
virtual Model::AddTagsOutcomeCallable AddTagsCallable (const Model::AddTagsRequest &request) const
 
virtual void AddTagsAsync (const Model::AddTagsRequest &request, const AddTagsResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::CreateBatchPredictionOutcome CreateBatchPrediction (const Model::CreateBatchPredictionRequest &request) const
 
virtual Model::CreateBatchPredictionOutcomeCallable CreateBatchPredictionCallable (const Model::CreateBatchPredictionRequest &request) const
 
virtual void CreateBatchPredictionAsync (const Model::CreateBatchPredictionRequest &request, const CreateBatchPredictionResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::CreateDataSourceFromRDSOutcome CreateDataSourceFromRDS (const Model::CreateDataSourceFromRDSRequest &request) const
 
virtual Model::CreateDataSourceFromRDSOutcomeCallable CreateDataSourceFromRDSCallable (const Model::CreateDataSourceFromRDSRequest &request) const
 
virtual void CreateDataSourceFromRDSAsync (const Model::CreateDataSourceFromRDSRequest &request, const CreateDataSourceFromRDSResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::CreateDataSourceFromRedshiftOutcome CreateDataSourceFromRedshift (const Model::CreateDataSourceFromRedshiftRequest &request) const
 
virtual Model::CreateDataSourceFromRedshiftOutcomeCallable CreateDataSourceFromRedshiftCallable (const Model::CreateDataSourceFromRedshiftRequest &request) const
 
virtual void CreateDataSourceFromRedshiftAsync (const Model::CreateDataSourceFromRedshiftRequest &request, const CreateDataSourceFromRedshiftResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::CreateDataSourceFromS3Outcome CreateDataSourceFromS3 (const Model::CreateDataSourceFromS3Request &request) const
 
virtual Model::CreateDataSourceFromS3OutcomeCallable CreateDataSourceFromS3Callable (const Model::CreateDataSourceFromS3Request &request) const
 
virtual void CreateDataSourceFromS3Async (const Model::CreateDataSourceFromS3Request &request, const CreateDataSourceFromS3ResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::CreateEvaluationOutcome CreateEvaluation (const Model::CreateEvaluationRequest &request) const
 
virtual Model::CreateEvaluationOutcomeCallable CreateEvaluationCallable (const Model::CreateEvaluationRequest &request) const
 
virtual void CreateEvaluationAsync (const Model::CreateEvaluationRequest &request, const CreateEvaluationResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::CreateMLModelOutcome CreateMLModel (const Model::CreateMLModelRequest &request) const
 
virtual Model::CreateMLModelOutcomeCallable CreateMLModelCallable (const Model::CreateMLModelRequest &request) const
 
virtual void CreateMLModelAsync (const Model::CreateMLModelRequest &request, const CreateMLModelResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::CreateRealtimeEndpointOutcome CreateRealtimeEndpoint (const Model::CreateRealtimeEndpointRequest &request) const
 
virtual Model::CreateRealtimeEndpointOutcomeCallable CreateRealtimeEndpointCallable (const Model::CreateRealtimeEndpointRequest &request) const
 
virtual void CreateRealtimeEndpointAsync (const Model::CreateRealtimeEndpointRequest &request, const CreateRealtimeEndpointResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DeleteBatchPredictionOutcome DeleteBatchPrediction (const Model::DeleteBatchPredictionRequest &request) const
 
virtual Model::DeleteBatchPredictionOutcomeCallable DeleteBatchPredictionCallable (const Model::DeleteBatchPredictionRequest &request) const
 
virtual void DeleteBatchPredictionAsync (const Model::DeleteBatchPredictionRequest &request, const DeleteBatchPredictionResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DeleteDataSourceOutcome DeleteDataSource (const Model::DeleteDataSourceRequest &request) const
 
virtual Model::DeleteDataSourceOutcomeCallable DeleteDataSourceCallable (const Model::DeleteDataSourceRequest &request) const
 
virtual void DeleteDataSourceAsync (const Model::DeleteDataSourceRequest &request, const DeleteDataSourceResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DeleteEvaluationOutcome DeleteEvaluation (const Model::DeleteEvaluationRequest &request) const
 
virtual Model::DeleteEvaluationOutcomeCallable DeleteEvaluationCallable (const Model::DeleteEvaluationRequest &request) const
 
virtual void DeleteEvaluationAsync (const Model::DeleteEvaluationRequest &request, const DeleteEvaluationResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DeleteMLModelOutcome DeleteMLModel (const Model::DeleteMLModelRequest &request) const
 
virtual Model::DeleteMLModelOutcomeCallable DeleteMLModelCallable (const Model::DeleteMLModelRequest &request) const
 
virtual void DeleteMLModelAsync (const Model::DeleteMLModelRequest &request, const DeleteMLModelResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DeleteRealtimeEndpointOutcome DeleteRealtimeEndpoint (const Model::DeleteRealtimeEndpointRequest &request) const
 
virtual Model::DeleteRealtimeEndpointOutcomeCallable DeleteRealtimeEndpointCallable (const Model::DeleteRealtimeEndpointRequest &request) const
 
virtual void DeleteRealtimeEndpointAsync (const Model::DeleteRealtimeEndpointRequest &request, const DeleteRealtimeEndpointResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DeleteTagsOutcome DeleteTags (const Model::DeleteTagsRequest &request) const
 
virtual Model::DeleteTagsOutcomeCallable DeleteTagsCallable (const Model::DeleteTagsRequest &request) const
 
virtual void DeleteTagsAsync (const Model::DeleteTagsRequest &request, const DeleteTagsResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DescribeBatchPredictionsOutcome DescribeBatchPredictions (const Model::DescribeBatchPredictionsRequest &request) const
 
virtual Model::DescribeBatchPredictionsOutcomeCallable DescribeBatchPredictionsCallable (const Model::DescribeBatchPredictionsRequest &request) const
 
virtual void DescribeBatchPredictionsAsync (const Model::DescribeBatchPredictionsRequest &request, const DescribeBatchPredictionsResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DescribeDataSourcesOutcome DescribeDataSources (const Model::DescribeDataSourcesRequest &request) const
 
virtual Model::DescribeDataSourcesOutcomeCallable DescribeDataSourcesCallable (const Model::DescribeDataSourcesRequest &request) const
 
virtual void DescribeDataSourcesAsync (const Model::DescribeDataSourcesRequest &request, const DescribeDataSourcesResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DescribeEvaluationsOutcome DescribeEvaluations (const Model::DescribeEvaluationsRequest &request) const
 
virtual Model::DescribeEvaluationsOutcomeCallable DescribeEvaluationsCallable (const Model::DescribeEvaluationsRequest &request) const
 
virtual void DescribeEvaluationsAsync (const Model::DescribeEvaluationsRequest &request, const DescribeEvaluationsResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DescribeMLModelsOutcome DescribeMLModels (const Model::DescribeMLModelsRequest &request) const
 
virtual Model::DescribeMLModelsOutcomeCallable DescribeMLModelsCallable (const Model::DescribeMLModelsRequest &request) const
 
virtual void DescribeMLModelsAsync (const Model::DescribeMLModelsRequest &request, const DescribeMLModelsResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::DescribeTagsOutcome DescribeTags (const Model::DescribeTagsRequest &request) const
 
virtual Model::DescribeTagsOutcomeCallable DescribeTagsCallable (const Model::DescribeTagsRequest &request) const
 
virtual void DescribeTagsAsync (const Model::DescribeTagsRequest &request, const DescribeTagsResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::GetBatchPredictionOutcome GetBatchPrediction (const Model::GetBatchPredictionRequest &request) const
 
virtual Model::GetBatchPredictionOutcomeCallable GetBatchPredictionCallable (const Model::GetBatchPredictionRequest &request) const
 
virtual void GetBatchPredictionAsync (const Model::GetBatchPredictionRequest &request, const GetBatchPredictionResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::GetDataSourceOutcome GetDataSource (const Model::GetDataSourceRequest &request) const
 
virtual Model::GetDataSourceOutcomeCallable GetDataSourceCallable (const Model::GetDataSourceRequest &request) const
 
virtual void GetDataSourceAsync (const Model::GetDataSourceRequest &request, const GetDataSourceResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::GetEvaluationOutcome GetEvaluation (const Model::GetEvaluationRequest &request) const
 
virtual Model::GetEvaluationOutcomeCallable GetEvaluationCallable (const Model::GetEvaluationRequest &request) const
 
virtual void GetEvaluationAsync (const Model::GetEvaluationRequest &request, const GetEvaluationResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::GetMLModelOutcome GetMLModel (const Model::GetMLModelRequest &request) const
 
virtual Model::GetMLModelOutcomeCallable GetMLModelCallable (const Model::GetMLModelRequest &request) const
 
virtual void GetMLModelAsync (const Model::GetMLModelRequest &request, const GetMLModelResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::PredictOutcome Predict (const Model::PredictRequest &request) const
 
virtual Model::PredictOutcomeCallable PredictCallable (const Model::PredictRequest &request) const
 
virtual void PredictAsync (const Model::PredictRequest &request, const PredictResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::UpdateBatchPredictionOutcome UpdateBatchPrediction (const Model::UpdateBatchPredictionRequest &request) const
 
virtual Model::UpdateBatchPredictionOutcomeCallable UpdateBatchPredictionCallable (const Model::UpdateBatchPredictionRequest &request) const
 
virtual void UpdateBatchPredictionAsync (const Model::UpdateBatchPredictionRequest &request, const UpdateBatchPredictionResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::UpdateDataSourceOutcome UpdateDataSource (const Model::UpdateDataSourceRequest &request) const
 
virtual Model::UpdateDataSourceOutcomeCallable UpdateDataSourceCallable (const Model::UpdateDataSourceRequest &request) const
 
virtual void UpdateDataSourceAsync (const Model::UpdateDataSourceRequest &request, const UpdateDataSourceResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::UpdateEvaluationOutcome UpdateEvaluation (const Model::UpdateEvaluationRequest &request) const
 
virtual Model::UpdateEvaluationOutcomeCallable UpdateEvaluationCallable (const Model::UpdateEvaluationRequest &request) const
 
virtual void UpdateEvaluationAsync (const Model::UpdateEvaluationRequest &request, const UpdateEvaluationResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
virtual Model::UpdateMLModelOutcome UpdateMLModel (const Model::UpdateMLModelRequest &request) const
 
virtual Model::UpdateMLModelOutcomeCallable UpdateMLModelCallable (const Model::UpdateMLModelRequest &request) const
 
virtual void UpdateMLModelAsync (const Model::UpdateMLModelRequest &request, const UpdateMLModelResponseReceivedHandler &handler, const std::shared_ptr< const Aws::Client::AsyncCallerContext > &context=nullptr) const
 
void OverrideEndpoint (const Aws::String &endpoint)
 
- Public Member Functions inherited from Aws::Client::AWSJsonClient
 AWSJsonClient (const Aws::Client::ClientConfiguration &configuration, const std::shared_ptr< Aws::Client::AWSAuthSigner > &signer, const std::shared_ptr< AWSErrorMarshaller > &errorMarshaller)
 
 AWSJsonClient (const Aws::Client::ClientConfiguration &configuration, const std::shared_ptr< Aws::Auth::AWSAuthSignerProvider > &signerProvider, const std::shared_ptr< AWSErrorMarshaller > &errorMarshaller)
 
virtual ~AWSJsonClient ()=default
 
- Public Member Functions inherited from Aws::Client::AWSClient
 AWSClient (const Aws::Client::ClientConfiguration &configuration, const std::shared_ptr< Aws::Client::AWSAuthSigner > &signer, const std::shared_ptr< AWSErrorMarshaller > &errorMarshaller)
 
 AWSClient (const Aws::Client::ClientConfiguration &configuration, const std::shared_ptr< Aws::Auth::AWSAuthSignerProvider > &signerProvider, const std::shared_ptr< AWSErrorMarshaller > &errorMarshaller)
 
virtual ~AWSClient ()
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, long long expirationInSeconds=0)
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, const Aws::Http::HeaderValueCollection &customizedHeaders, long long expirationInSeconds=0)
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, long long expirationInSeconds=0) const
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const Aws::Http::HeaderValueCollection &customizedHeaders, long long expirationInSeconds=0)
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const char *serviceName, long long expirationInSeconds=0) const
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const char *serviceName, const Aws::Http::HeaderValueCollection &customizedHeaders, long long expirationInSeconds=0)
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const char *serviceName, const char *signerName, long long expirationInSeconds=0) const
 
Aws::String GeneratePresignedUrl (Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const char *serviceName, const char *signerName, const Aws::Http::HeaderValueCollection &customizedHeaders, long long expirationInSeconds=0)
 
Aws::String GeneratePresignedUrl (const Aws::AmazonWebServiceRequest &request, Aws::Http::URI &uri, Aws::Http::HttpMethod method, const Aws::Http::QueryStringParameterCollection &extraParams=Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds=0) const
 
Aws::String GeneratePresignedUrl (const Aws::AmazonWebServiceRequest &request, Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const char *serviceName, const char *signerName, const Aws::Http::QueryStringParameterCollection &extraParams=Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds=0) const
 
Aws::String GeneratePresignedUrl (const Aws::AmazonWebServiceRequest &request, Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const char *serviceName, const Aws::Http::QueryStringParameterCollection &extraParams=Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds=0) const
 
Aws::String GeneratePresignedUrl (const Aws::AmazonWebServiceRequest &request, Aws::Http::URI &uri, Aws::Http::HttpMethod method, const char *region, const Aws::Http::QueryStringParameterCollection &extraParams=Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds=0) const
 
void DisableRequestProcessing ()
 
void EnableRequestProcessing ()
 
virtual const char * GetServiceClientName () const
 
virtual void SetServiceClientName (const Aws::String &name)
 

Additional Inherited Members

- Protected Member Functions inherited from Aws::Client::AWSJsonClient
virtual AWSError< CoreErrorsBuildAWSError (const std::shared_ptr< Aws::Http::HttpResponse > &response) const override
 
JsonOutcome MakeRequest (const Aws::Http::URI &uri, const Aws::AmazonWebServiceRequest &request, Http::HttpMethod method=Http::HttpMethod::HTTP_POST, const char *signerName=Aws::Auth::SIGV4_SIGNER, const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
JsonOutcome MakeRequest (const Aws::Http::URI &uri, Http::HttpMethod method=Http::HttpMethod::HTTP_POST, const char *signerName=Aws::Auth::SIGV4_SIGNER, const char *requestName="", const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
JsonOutcome MakeEventStreamRequest (std::shared_ptr< Aws::Http::HttpRequest > &request) const
 
- Protected Member Functions inherited from Aws::Client::AWSClient
HttpResponseOutcome AttemptExhaustively (const Aws::Http::URI &uri, const Aws::AmazonWebServiceRequest &request, Http::HttpMethod httpMethod, const char *signerName, const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
HttpResponseOutcome AttemptExhaustively (const Aws::Http::URI &uri, Http::HttpMethod httpMethod, const char *signerName, const char *requestName="", const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
HttpResponseOutcome AttemptOneRequest (const std::shared_ptr< Http::HttpRequest > &httpRequest, const Aws::AmazonWebServiceRequest &request, const char *signerName, const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
HttpResponseOutcome AttemptOneRequest (const std::shared_ptr< Http::HttpRequest > &httpRequest, const char *signerName, const char *requestName="", const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
StreamOutcome MakeRequestWithUnparsedResponse (const Aws::Http::URI &uri, const Aws::AmazonWebServiceRequest &request, Http::HttpMethod method=Http::HttpMethod::HTTP_POST, const char *signerName=Aws::Auth::SIGV4_SIGNER, const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
StreamOutcome MakeRequestWithUnparsedResponse (const Aws::Http::URI &uri, Http::HttpMethod method=Http::HttpMethod::HTTP_POST, const char *signerName=Aws::Auth::SIGV4_SIGNER, const char *requestName="", const char *signerRegionOverride=nullptr, const char *signerServiceNameOverride=nullptr) const
 
virtual void BuildHttpRequest (const Aws::AmazonWebServiceRequest &request, const std::shared_ptr< Aws::Http::HttpRequest > &httpRequest) const
 
const std::shared_ptr< AWSErrorMarshaller > & GetErrorMarshaller () const
 
Aws::Client::AWSAuthSignerGetSignerByName (const char *name) const
 
std::shared_ptr< Aws::Http::HttpRequestBuildAndSignHttpRequest (const Aws::Http::URI &uri, const Aws::AmazonWebServiceRequest &request, Http::HttpMethod method, const char *signerName) const
 
std::shared_ptr< Aws::Http::HttpResponseMakeHttpRequest (std::shared_ptr< Aws::Http::HttpRequest > &request) const
 
- Protected Attributes inherited from Aws::Client::AWSClient
Aws::String m_region
 

Detailed Description

Definition of the public APIs exposed by Amazon Machine Learning

Definition at line 203 of file MachineLearningClient.h.

Member Typedef Documentation

◆ BASECLASS

Definition at line 206 of file MachineLearningClient.h.

Constructor & Destructor Documentation

◆ MachineLearningClient() [1/3]

Aws::MachineLearning::MachineLearningClient::MachineLearningClient ( const Aws::Client::ClientConfiguration clientConfiguration = Aws::Client::ClientConfiguration())

Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config is not specified, it will be initialized to default values.

◆ MachineLearningClient() [2/3]

Aws::MachineLearning::MachineLearningClient::MachineLearningClient ( const Aws::Auth::AWSCredentials credentials,
const Aws::Client::ClientConfiguration clientConfiguration = Aws::Client::ClientConfiguration() 
)

Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config is not specified, it will be initialized to default values.

◆ MachineLearningClient() [3/3]

Aws::MachineLearning::MachineLearningClient::MachineLearningClient ( const std::shared_ptr< Aws::Auth::AWSCredentialsProvider > &  credentialsProvider,
const Aws::Client::ClientConfiguration clientConfiguration = Aws::Client::ClientConfiguration() 
)

Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, the default http client factory will be used

◆ ~MachineLearningClient()

virtual Aws::MachineLearning::MachineLearningClient::~MachineLearningClient ( )
virtual

Member Function Documentation

◆ AddTags()

virtual Model::AddTagsOutcome Aws::MachineLearning::MachineLearningClient::AddTags ( const Model::AddTagsRequest request) const
virtual

Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, AddTags updates the tag's value.

See Also:

AWS API Reference

◆ AddTagsAsync()

virtual void Aws::MachineLearning::MachineLearningClient::AddTagsAsync ( const Model::AddTagsRequest request,
const AddTagsResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, AddTags updates the tag's value.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ AddTagsCallable()

virtual Model::AddTagsOutcomeCallable Aws::MachineLearning::MachineLearningClient::AddTagsCallable ( const Model::AddTagsRequest request) const
virtual

Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, AddTags updates the tag's value.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ CreateBatchPrediction()

virtual Model::CreateBatchPredictionOutcome Aws::MachineLearning::MachineLearningClient::CreateBatchPrediction ( const Model::CreateBatchPredictionRequest request) const
virtual

Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a DataSource. This operation creates a new BatchPrediction, and uses an MLModel and the data files referenced by the DataSource as information sources.

CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction status to PENDING. After the BatchPrediction completes, Amazon ML sets the status to COMPLETED.

You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result. After the COMPLETED status appears, the results are available in the location specified by the OutputUri parameter.

See Also:

AWS API Reference

◆ CreateBatchPredictionAsync()

virtual void Aws::MachineLearning::MachineLearningClient::CreateBatchPredictionAsync ( const Model::CreateBatchPredictionRequest request,
const CreateBatchPredictionResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a DataSource. This operation creates a new BatchPrediction, and uses an MLModel and the data files referenced by the DataSource as information sources.

CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction status to PENDING. After the BatchPrediction completes, Amazon ML sets the status to COMPLETED.

You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result. After the COMPLETED status appears, the results are available in the location specified by the OutputUri parameter.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ CreateBatchPredictionCallable()

virtual Model::CreateBatchPredictionOutcomeCallable Aws::MachineLearning::MachineLearningClient::CreateBatchPredictionCallable ( const Model::CreateBatchPredictionRequest request) const
virtual

Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a DataSource. This operation creates a new BatchPrediction, and uses an MLModel and the data files referenced by the DataSource as information sources.

CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction status to PENDING. After the BatchPrediction completes, Amazon ML sets the status to COMPLETED.

You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result. After the COMPLETED status appears, the results are available in the location specified by the OutputUri parameter.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ CreateDataSourceFromRDS()

virtual Model::CreateDataSourceFromRDSOutcome Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromRDS ( const Model::CreateDataSourceFromRDSRequest request) const
virtual

Creates a DataSource object from an Amazon Relational Database Service (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used only to perform >CreateMLModel>, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

See Also:

AWS API Reference

◆ CreateDataSourceFromRDSAsync()

virtual void Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromRDSAsync ( const Model::CreateDataSourceFromRDSRequest request,
const CreateDataSourceFromRDSResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Creates a DataSource object from an Amazon Relational Database Service (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used only to perform >CreateMLModel>, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ CreateDataSourceFromRDSCallable()

virtual Model::CreateDataSourceFromRDSOutcomeCallable Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromRDSCallable ( const Model::CreateDataSourceFromRDSRequest request) const
virtual

Creates a DataSource object from an Amazon Relational Database Service (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used only to perform >CreateMLModel>, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ CreateDataSourceFromRedshift()

virtual Model::CreateDataSourceFromRedshiftOutcome Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromRedshift ( const Model::CreateDataSourceFromRedshiftRequest request) const
virtual

Creates a DataSource from a database hosted on an Amazon Redshift cluster. A DataSource references data that can be used to perform either CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromRedshift is an asynchronous operation. In response to CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING states can be used to perform only CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a SelectSqlQuery query. Amazon ML executes an Unload command in Amazon Redshift to transfer the result set of the SelectSqlQuery query to S3StagingLocation.

After the DataSource has been created, it's ready for use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also requires a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call GetDataSource for an existing datasource and copy the values to a CreateDataSource call. Change the settings that you want to change and make sure that all required fields have the appropriate values.

See Also:

AWS API Reference

◆ CreateDataSourceFromRedshiftAsync()

virtual void Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromRedshiftAsync ( const Model::CreateDataSourceFromRedshiftRequest request,
const CreateDataSourceFromRedshiftResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Creates a DataSource from a database hosted on an Amazon Redshift cluster. A DataSource references data that can be used to perform either CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromRedshift is an asynchronous operation. In response to CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING states can be used to perform only CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a SelectSqlQuery query. Amazon ML executes an Unload command in Amazon Redshift to transfer the result set of the SelectSqlQuery query to S3StagingLocation.

After the DataSource has been created, it's ready for use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also requires a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call GetDataSource for an existing datasource and copy the values to a CreateDataSource call. Change the settings that you want to change and make sure that all required fields have the appropriate values.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ CreateDataSourceFromRedshiftCallable()

virtual Model::CreateDataSourceFromRedshiftOutcomeCallable Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromRedshiftCallable ( const Model::CreateDataSourceFromRedshiftRequest request) const
virtual

Creates a DataSource from a database hosted on an Amazon Redshift cluster. A DataSource references data that can be used to perform either CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromRedshift is an asynchronous operation. In response to CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING states can be used to perform only CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a SelectSqlQuery query. Amazon ML executes an Unload command in Amazon Redshift to transfer the result set of the SelectSqlQuery query to S3StagingLocation.

After the DataSource has been created, it's ready for use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also requires a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call GetDataSource for an existing datasource and copy the values to a CreateDataSource call. Change the settings that you want to change and make sure that all required fields have the appropriate values.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ CreateDataSourceFromS3()

virtual Model::CreateDataSourceFromS3Outcome Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromS3 ( const Model::CreateDataSourceFromS3Request request) const
virtual

Creates a DataSource object. A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource has been created and is ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used to perform only CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observation data used in a DataSource should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the DataSource.

After the DataSource has been created, it's ready to use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also needs a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

See Also:

AWS API Reference

◆ CreateDataSourceFromS3Async()

virtual void Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromS3Async ( const Model::CreateDataSourceFromS3Request request,
const CreateDataSourceFromS3ResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Creates a DataSource object. A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource has been created and is ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used to perform only CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observation data used in a DataSource should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the DataSource.

After the DataSource has been created, it's ready to use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also needs a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ CreateDataSourceFromS3Callable()

virtual Model::CreateDataSourceFromS3OutcomeCallable Aws::MachineLearning::MachineLearningClient::CreateDataSourceFromS3Callable ( const Model::CreateDataSourceFromS3Request request) const
virtual

Creates a DataSource object. A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource has been created and is ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used to perform only CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

The observation data used in a DataSource should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the DataSource.

After the DataSource has been created, it's ready to use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also needs a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ CreateEvaluation()

virtual Model::CreateEvaluationOutcome Aws::MachineLearning::MachineLearningClient::CreateEvaluation ( const Model::CreateEvaluationRequest request) const
virtual

Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set of observations associated to a DataSource. Like a DataSource for an MLModel, the DataSource for an Evaluation contains values for the Target Variable. The Evaluation compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the MLModel functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS.

CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to PENDING. After the Evaluation is created and ready for use, Amazon ML sets the status to COMPLETED.

You can use the GetEvaluation operation to check progress of the evaluation during the creation operation.

See Also:

AWS API Reference

◆ CreateEvaluationAsync()

virtual void Aws::MachineLearning::MachineLearningClient::CreateEvaluationAsync ( const Model::CreateEvaluationRequest request,
const CreateEvaluationResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set of observations associated to a DataSource. Like a DataSource for an MLModel, the DataSource for an Evaluation contains values for the Target Variable. The Evaluation compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the MLModel functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS.

CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to PENDING. After the Evaluation is created and ready for use, Amazon ML sets the status to COMPLETED.

You can use the GetEvaluation operation to check progress of the evaluation during the creation operation.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ CreateEvaluationCallable()

virtual Model::CreateEvaluationOutcomeCallable Aws::MachineLearning::MachineLearningClient::CreateEvaluationCallable ( const Model::CreateEvaluationRequest request) const
virtual

Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set of observations associated to a DataSource. Like a DataSource for an MLModel, the DataSource for an Evaluation contains values for the Target Variable. The Evaluation compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the MLModel functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS.

CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to PENDING. After the Evaluation is created and ready for use, Amazon ML sets the status to COMPLETED.

You can use the GetEvaluation operation to check progress of the evaluation during the creation operation.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ CreateMLModel()

virtual Model::CreateMLModelOutcome Aws::MachineLearning::MachineLearningClient::CreateMLModel ( const Model::CreateMLModelRequest request) const
virtual

Creates a new MLModel using the DataSource and the recipe as information sources.

An MLModel is nearly immutable. Users can update only the MLModelName and the ScoreThreshold in an MLModel without creating a new MLModel.

CreateMLModel is an asynchronous operation. In response to CreateMLModel, Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel status to PENDING. After the MLModel has been created and ready is for use, Amazon ML sets the status to COMPLETED.

You can use the GetMLModel operation to check the progress of the MLModel during the creation operation.

CreateMLModel requires a DataSource with computed statistics, which can be created by setting ComputeStatistics to true in CreateDataSourceFromRDS, CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations.

See Also:

AWS API Reference

◆ CreateMLModelAsync()

virtual void Aws::MachineLearning::MachineLearningClient::CreateMLModelAsync ( const Model::CreateMLModelRequest request,
const CreateMLModelResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Creates a new MLModel using the DataSource and the recipe as information sources.

An MLModel is nearly immutable. Users can update only the MLModelName and the ScoreThreshold in an MLModel without creating a new MLModel.

CreateMLModel is an asynchronous operation. In response to CreateMLModel, Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel status to PENDING. After the MLModel has been created and ready is for use, Amazon ML sets the status to COMPLETED.

You can use the GetMLModel operation to check the progress of the MLModel during the creation operation.

CreateMLModel requires a DataSource with computed statistics, which can be created by setting ComputeStatistics to true in CreateDataSourceFromRDS, CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ CreateMLModelCallable()

virtual Model::CreateMLModelOutcomeCallable Aws::MachineLearning::MachineLearningClient::CreateMLModelCallable ( const Model::CreateMLModelRequest request) const
virtual

Creates a new MLModel using the DataSource and the recipe as information sources.

An MLModel is nearly immutable. Users can update only the MLModelName and the ScoreThreshold in an MLModel without creating a new MLModel.

CreateMLModel is an asynchronous operation. In response to CreateMLModel, Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel status to PENDING. After the MLModel has been created and ready is for use, Amazon ML sets the status to COMPLETED.

You can use the GetMLModel operation to check the progress of the MLModel during the creation operation.

CreateMLModel requires a DataSource with computed statistics, which can be created by setting ComputeStatistics to true in CreateDataSourceFromRDS, CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ CreateRealtimeEndpoint()

virtual Model::CreateRealtimeEndpointOutcome Aws::MachineLearning::MachineLearningClient::CreateRealtimeEndpoint ( const Model::CreateRealtimeEndpointRequest request) const
virtual

Creates a real-time endpoint for the MLModel. The endpoint contains the URI of the MLModel; that is, the location to send real-time prediction requests for the specified MLModel.

See Also:

AWS API Reference

◆ CreateRealtimeEndpointAsync()

virtual void Aws::MachineLearning::MachineLearningClient::CreateRealtimeEndpointAsync ( const Model::CreateRealtimeEndpointRequest request,
const CreateRealtimeEndpointResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Creates a real-time endpoint for the MLModel. The endpoint contains the URI of the MLModel; that is, the location to send real-time prediction requests for the specified MLModel.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ CreateRealtimeEndpointCallable()

virtual Model::CreateRealtimeEndpointOutcomeCallable Aws::MachineLearning::MachineLearningClient::CreateRealtimeEndpointCallable ( const Model::CreateRealtimeEndpointRequest request) const
virtual

Creates a real-time endpoint for the MLModel. The endpoint contains the URI of the MLModel; that is, the location to send real-time prediction requests for the specified MLModel.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DeleteBatchPrediction()

virtual Model::DeleteBatchPredictionOutcome Aws::MachineLearning::MachineLearningClient::DeleteBatchPrediction ( const Model::DeleteBatchPredictionRequest request) const
virtual

Assigns the DELETED status to a BatchPrediction, rendering it unusable.

After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction operation to verify that the status of the BatchPrediction changed to DELETED.

Caution: The result of the DeleteBatchPrediction operation is irreversible.

See Also:

AWS API Reference

◆ DeleteBatchPredictionAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DeleteBatchPredictionAsync ( const Model::DeleteBatchPredictionRequest request,
const DeleteBatchPredictionResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Assigns the DELETED status to a BatchPrediction, rendering it unusable.

After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction operation to verify that the status of the BatchPrediction changed to DELETED.

Caution: The result of the DeleteBatchPrediction operation is irreversible.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DeleteBatchPredictionCallable()

virtual Model::DeleteBatchPredictionOutcomeCallable Aws::MachineLearning::MachineLearningClient::DeleteBatchPredictionCallable ( const Model::DeleteBatchPredictionRequest request) const
virtual

Assigns the DELETED status to a BatchPrediction, rendering it unusable.

After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction operation to verify that the status of the BatchPrediction changed to DELETED.

Caution: The result of the DeleteBatchPrediction operation is irreversible.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DeleteDataSource()

virtual Model::DeleteDataSourceOutcome Aws::MachineLearning::MachineLearningClient::DeleteDataSource ( const Model::DeleteDataSourceRequest request) const
virtual

Assigns the DELETED status to a DataSource, rendering it unusable.

After using the DeleteDataSource operation, you can use the GetDataSource operation to verify that the status of the DataSource changed to DELETED.

Caution: The results of the DeleteDataSource operation are irreversible.

See Also:

AWS API Reference

◆ DeleteDataSourceAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DeleteDataSourceAsync ( const Model::DeleteDataSourceRequest request,
const DeleteDataSourceResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Assigns the DELETED status to a DataSource, rendering it unusable.

After using the DeleteDataSource operation, you can use the GetDataSource operation to verify that the status of the DataSource changed to DELETED.

Caution: The results of the DeleteDataSource operation are irreversible.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DeleteDataSourceCallable()

virtual Model::DeleteDataSourceOutcomeCallable Aws::MachineLearning::MachineLearningClient::DeleteDataSourceCallable ( const Model::DeleteDataSourceRequest request) const
virtual

Assigns the DELETED status to a DataSource, rendering it unusable.

After using the DeleteDataSource operation, you can use the GetDataSource operation to verify that the status of the DataSource changed to DELETED.

Caution: The results of the DeleteDataSource operation are irreversible.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DeleteEvaluation()

virtual Model::DeleteEvaluationOutcome Aws::MachineLearning::MachineLearningClient::DeleteEvaluation ( const Model::DeleteEvaluationRequest request) const
virtual

Assigns the DELETED status to an Evaluation, rendering it unusable.

After invoking the DeleteEvaluation operation, you can use the GetEvaluation operation to verify that the status of the Evaluation changed to DELETED.

Caution: The results of the DeleteEvaluation operation are irreversible.

See Also:

AWS API Reference

◆ DeleteEvaluationAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DeleteEvaluationAsync ( const Model::DeleteEvaluationRequest request,
const DeleteEvaluationResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Assigns the DELETED status to an Evaluation, rendering it unusable.

After invoking the DeleteEvaluation operation, you can use the GetEvaluation operation to verify that the status of the Evaluation changed to DELETED.

Caution: The results of the DeleteEvaluation operation are irreversible.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DeleteEvaluationCallable()

virtual Model::DeleteEvaluationOutcomeCallable Aws::MachineLearning::MachineLearningClient::DeleteEvaluationCallable ( const Model::DeleteEvaluationRequest request) const
virtual

Assigns the DELETED status to an Evaluation, rendering it unusable.

After invoking the DeleteEvaluation operation, you can use the GetEvaluation operation to verify that the status of the Evaluation changed to DELETED.

Caution: The results of the DeleteEvaluation operation are irreversible.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DeleteMLModel()

virtual Model::DeleteMLModelOutcome Aws::MachineLearning::MachineLearningClient::DeleteMLModel ( const Model::DeleteMLModelRequest request) const
virtual

Assigns the DELETED status to an MLModel, rendering it unusable.

After using the DeleteMLModel operation, you can use the GetMLModel operation to verify that the status of the MLModel changed to DELETED.

Caution: The result of the DeleteMLModel operation is irreversible.

See Also:

AWS API Reference

◆ DeleteMLModelAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DeleteMLModelAsync ( const Model::DeleteMLModelRequest request,
const DeleteMLModelResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Assigns the DELETED status to an MLModel, rendering it unusable.

After using the DeleteMLModel operation, you can use the GetMLModel operation to verify that the status of the MLModel changed to DELETED.

Caution: The result of the DeleteMLModel operation is irreversible.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DeleteMLModelCallable()

virtual Model::DeleteMLModelOutcomeCallable Aws::MachineLearning::MachineLearningClient::DeleteMLModelCallable ( const Model::DeleteMLModelRequest request) const
virtual

Assigns the DELETED status to an MLModel, rendering it unusable.

After using the DeleteMLModel operation, you can use the GetMLModel operation to verify that the status of the MLModel changed to DELETED.

Caution: The result of the DeleteMLModel operation is irreversible.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DeleteRealtimeEndpoint()

virtual Model::DeleteRealtimeEndpointOutcome Aws::MachineLearning::MachineLearningClient::DeleteRealtimeEndpoint ( const Model::DeleteRealtimeEndpointRequest request) const
virtual

Deletes a real time endpoint of an MLModel.

See Also:

AWS API Reference

◆ DeleteRealtimeEndpointAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DeleteRealtimeEndpointAsync ( const Model::DeleteRealtimeEndpointRequest request,
const DeleteRealtimeEndpointResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Deletes a real time endpoint of an MLModel.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DeleteRealtimeEndpointCallable()

virtual Model::DeleteRealtimeEndpointOutcomeCallable Aws::MachineLearning::MachineLearningClient::DeleteRealtimeEndpointCallable ( const Model::DeleteRealtimeEndpointRequest request) const
virtual

Deletes a real time endpoint of an MLModel.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DeleteTags()

virtual Model::DeleteTagsOutcome Aws::MachineLearning::MachineLearningClient::DeleteTags ( const Model::DeleteTagsRequest request) const
virtual

Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.

If you specify a tag that doesn't exist, Amazon ML ignores it.

See Also:

AWS API Reference

◆ DeleteTagsAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DeleteTagsAsync ( const Model::DeleteTagsRequest request,
const DeleteTagsResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.

If you specify a tag that doesn't exist, Amazon ML ignores it.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DeleteTagsCallable()

virtual Model::DeleteTagsOutcomeCallable Aws::MachineLearning::MachineLearningClient::DeleteTagsCallable ( const Model::DeleteTagsRequest request) const
virtual

Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.

If you specify a tag that doesn't exist, Amazon ML ignores it.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DescribeBatchPredictions()

virtual Model::DescribeBatchPredictionsOutcome Aws::MachineLearning::MachineLearningClient::DescribeBatchPredictions ( const Model::DescribeBatchPredictionsRequest request) const
virtual

Returns a list of BatchPrediction operations that match the search criteria in the request.

See Also:

AWS API Reference

◆ DescribeBatchPredictionsAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DescribeBatchPredictionsAsync ( const Model::DescribeBatchPredictionsRequest request,
const DescribeBatchPredictionsResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns a list of BatchPrediction operations that match the search criteria in the request.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DescribeBatchPredictionsCallable()

virtual Model::DescribeBatchPredictionsOutcomeCallable Aws::MachineLearning::MachineLearningClient::DescribeBatchPredictionsCallable ( const Model::DescribeBatchPredictionsRequest request) const
virtual

Returns a list of BatchPrediction operations that match the search criteria in the request.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DescribeDataSources()

virtual Model::DescribeDataSourcesOutcome Aws::MachineLearning::MachineLearningClient::DescribeDataSources ( const Model::DescribeDataSourcesRequest request) const
virtual

Returns a list of DataSource that match the search criteria in the request.

See Also:

AWS API Reference

◆ DescribeDataSourcesAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DescribeDataSourcesAsync ( const Model::DescribeDataSourcesRequest request,
const DescribeDataSourcesResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns a list of DataSource that match the search criteria in the request.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DescribeDataSourcesCallable()

virtual Model::DescribeDataSourcesOutcomeCallable Aws::MachineLearning::MachineLearningClient::DescribeDataSourcesCallable ( const Model::DescribeDataSourcesRequest request) const
virtual

Returns a list of DataSource that match the search criteria in the request.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DescribeEvaluations()

virtual Model::DescribeEvaluationsOutcome Aws::MachineLearning::MachineLearningClient::DescribeEvaluations ( const Model::DescribeEvaluationsRequest request) const
virtual

Returns a list of DescribeEvaluations that match the search criteria in the request.

See Also:

AWS API Reference

◆ DescribeEvaluationsAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DescribeEvaluationsAsync ( const Model::DescribeEvaluationsRequest request,
const DescribeEvaluationsResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns a list of DescribeEvaluations that match the search criteria in the request.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DescribeEvaluationsCallable()

virtual Model::DescribeEvaluationsOutcomeCallable Aws::MachineLearning::MachineLearningClient::DescribeEvaluationsCallable ( const Model::DescribeEvaluationsRequest request) const
virtual

Returns a list of DescribeEvaluations that match the search criteria in the request.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DescribeMLModels()

virtual Model::DescribeMLModelsOutcome Aws::MachineLearning::MachineLearningClient::DescribeMLModels ( const Model::DescribeMLModelsRequest request) const
virtual

Returns a list of MLModel that match the search criteria in the request.

See Also:

AWS API Reference

◆ DescribeMLModelsAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DescribeMLModelsAsync ( const Model::DescribeMLModelsRequest request,
const DescribeMLModelsResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns a list of MLModel that match the search criteria in the request.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DescribeMLModelsCallable()

virtual Model::DescribeMLModelsOutcomeCallable Aws::MachineLearning::MachineLearningClient::DescribeMLModelsCallable ( const Model::DescribeMLModelsRequest request) const
virtual

Returns a list of MLModel that match the search criteria in the request.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ DescribeTags()

virtual Model::DescribeTagsOutcome Aws::MachineLearning::MachineLearningClient::DescribeTags ( const Model::DescribeTagsRequest request) const
virtual

Describes one or more of the tags for your Amazon ML object.

See Also:

AWS API Reference

◆ DescribeTagsAsync()

virtual void Aws::MachineLearning::MachineLearningClient::DescribeTagsAsync ( const Model::DescribeTagsRequest request,
const DescribeTagsResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Describes one or more of the tags for your Amazon ML object.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ DescribeTagsCallable()

virtual Model::DescribeTagsOutcomeCallable Aws::MachineLearning::MachineLearningClient::DescribeTagsCallable ( const Model::DescribeTagsRequest request) const
virtual

Describes one or more of the tags for your Amazon ML object.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ GetBatchPrediction()

virtual Model::GetBatchPredictionOutcome Aws::MachineLearning::MachineLearningClient::GetBatchPrediction ( const Model::GetBatchPredictionRequest request) const
virtual

Returns a BatchPrediction that includes detailed metadata, status, and data file information for a Batch Prediction request.

See Also:

AWS API Reference

◆ GetBatchPredictionAsync()

virtual void Aws::MachineLearning::MachineLearningClient::GetBatchPredictionAsync ( const Model::GetBatchPredictionRequest request,
const GetBatchPredictionResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns a BatchPrediction that includes detailed metadata, status, and data file information for a Batch Prediction request.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ GetBatchPredictionCallable()

virtual Model::GetBatchPredictionOutcomeCallable Aws::MachineLearning::MachineLearningClient::GetBatchPredictionCallable ( const Model::GetBatchPredictionRequest request) const
virtual

Returns a BatchPrediction that includes detailed metadata, status, and data file information for a Batch Prediction request.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ GetDataSource()

virtual Model::GetDataSourceOutcome Aws::MachineLearning::MachineLearningClient::GetDataSource ( const Model::GetDataSourceRequest request) const
virtual

Returns a DataSource that includes metadata and data file information, as well as the current status of the DataSource.

GetDataSource provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.

See Also:

AWS API Reference

◆ GetDataSourceAsync()

virtual void Aws::MachineLearning::MachineLearningClient::GetDataSourceAsync ( const Model::GetDataSourceRequest request,
const GetDataSourceResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns a DataSource that includes metadata and data file information, as well as the current status of the DataSource.

GetDataSource provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ GetDataSourceCallable()

virtual Model::GetDataSourceOutcomeCallable Aws::MachineLearning::MachineLearningClient::GetDataSourceCallable ( const Model::GetDataSourceRequest request) const
virtual

Returns a DataSource that includes metadata and data file information, as well as the current status of the DataSource.

GetDataSource provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ GetEvaluation()

virtual Model::GetEvaluationOutcome Aws::MachineLearning::MachineLearningClient::GetEvaluation ( const Model::GetEvaluationRequest request) const
virtual

Returns an Evaluation that includes metadata as well as the current status of the Evaluation.

See Also:

AWS API Reference

◆ GetEvaluationAsync()

virtual void Aws::MachineLearning::MachineLearningClient::GetEvaluationAsync ( const Model::GetEvaluationRequest request,
const GetEvaluationResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns an Evaluation that includes metadata as well as the current status of the Evaluation.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ GetEvaluationCallable()

virtual Model::GetEvaluationOutcomeCallable Aws::MachineLearning::MachineLearningClient::GetEvaluationCallable ( const Model::GetEvaluationRequest request) const
virtual

Returns an Evaluation that includes metadata as well as the current status of the Evaluation.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ GetMLModel()

virtual Model::GetMLModelOutcome Aws::MachineLearning::MachineLearningClient::GetMLModel ( const Model::GetMLModelRequest request) const
virtual

Returns an MLModel that includes detailed metadata, data source information, and the current status of the MLModel.

GetMLModel provides results in normal or verbose format.

See Also:

AWS API Reference

◆ GetMLModelAsync()

virtual void Aws::MachineLearning::MachineLearningClient::GetMLModelAsync ( const Model::GetMLModelRequest request,
const GetMLModelResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Returns an MLModel that includes detailed metadata, data source information, and the current status of the MLModel.

GetMLModel provides results in normal or verbose format.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ GetMLModelCallable()

virtual Model::GetMLModelOutcomeCallable Aws::MachineLearning::MachineLearningClient::GetMLModelCallable ( const Model::GetMLModelRequest request) const
virtual

Returns an MLModel that includes detailed metadata, data source information, and the current status of the MLModel.

GetMLModel provides results in normal or verbose format.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ OverrideEndpoint()

void Aws::MachineLearning::MachineLearningClient::OverrideEndpoint ( const Aws::String endpoint)

◆ Predict()

virtual Model::PredictOutcome Aws::MachineLearning::MachineLearningClient::Predict ( const Model::PredictRequest request) const
virtual

Generates a prediction for the observation using the specified ML Model.

Note: Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.

See Also:

AWS API Reference

◆ PredictAsync()

virtual void Aws::MachineLearning::MachineLearningClient::PredictAsync ( const Model::PredictRequest request,
const PredictResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Generates a prediction for the observation using the specified ML Model.

Note: Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ PredictCallable()

virtual Model::PredictOutcomeCallable Aws::MachineLearning::MachineLearningClient::PredictCallable ( const Model::PredictRequest request) const
virtual

Generates a prediction for the observation using the specified ML Model.

Note: Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ UpdateBatchPrediction()

virtual Model::UpdateBatchPredictionOutcome Aws::MachineLearning::MachineLearningClient::UpdateBatchPrediction ( const Model::UpdateBatchPredictionRequest request) const
virtual

Updates the BatchPredictionName of a BatchPrediction.

You can use the GetBatchPrediction operation to view the contents of the updated data element.

See Also:

AWS API Reference

◆ UpdateBatchPredictionAsync()

virtual void Aws::MachineLearning::MachineLearningClient::UpdateBatchPredictionAsync ( const Model::UpdateBatchPredictionRequest request,
const UpdateBatchPredictionResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Updates the BatchPredictionName of a BatchPrediction.

You can use the GetBatchPrediction operation to view the contents of the updated data element.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ UpdateBatchPredictionCallable()

virtual Model::UpdateBatchPredictionOutcomeCallable Aws::MachineLearning::MachineLearningClient::UpdateBatchPredictionCallable ( const Model::UpdateBatchPredictionRequest request) const
virtual

Updates the BatchPredictionName of a BatchPrediction.

You can use the GetBatchPrediction operation to view the contents of the updated data element.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ UpdateDataSource()

virtual Model::UpdateDataSourceOutcome Aws::MachineLearning::MachineLearningClient::UpdateDataSource ( const Model::UpdateDataSourceRequest request) const
virtual

Updates the DataSourceName of a DataSource.

You can use the GetDataSource operation to view the contents of the updated data element.

See Also:

AWS API Reference

◆ UpdateDataSourceAsync()

virtual void Aws::MachineLearning::MachineLearningClient::UpdateDataSourceAsync ( const Model::UpdateDataSourceRequest request,
const UpdateDataSourceResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Updates the DataSourceName of a DataSource.

You can use the GetDataSource operation to view the contents of the updated data element.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ UpdateDataSourceCallable()

virtual Model::UpdateDataSourceOutcomeCallable Aws::MachineLearning::MachineLearningClient::UpdateDataSourceCallable ( const Model::UpdateDataSourceRequest request) const
virtual

Updates the DataSourceName of a DataSource.

You can use the GetDataSource operation to view the contents of the updated data element.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ UpdateEvaluation()

virtual Model::UpdateEvaluationOutcome Aws::MachineLearning::MachineLearningClient::UpdateEvaluation ( const Model::UpdateEvaluationRequest request) const
virtual

Updates the EvaluationName of an Evaluation.

You can use the GetEvaluation operation to view the contents of the updated data element.

See Also:

AWS API Reference

◆ UpdateEvaluationAsync()

virtual void Aws::MachineLearning::MachineLearningClient::UpdateEvaluationAsync ( const Model::UpdateEvaluationRequest request,
const UpdateEvaluationResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Updates the EvaluationName of an Evaluation.

You can use the GetEvaluation operation to view the contents of the updated data element.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ UpdateEvaluationCallable()

virtual Model::UpdateEvaluationOutcomeCallable Aws::MachineLearning::MachineLearningClient::UpdateEvaluationCallable ( const Model::UpdateEvaluationRequest request) const
virtual

Updates the EvaluationName of an Evaluation.

You can use the GetEvaluation operation to view the contents of the updated data element.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.

◆ UpdateMLModel()

virtual Model::UpdateMLModelOutcome Aws::MachineLearning::MachineLearningClient::UpdateMLModel ( const Model::UpdateMLModelRequest request) const
virtual

Updates the MLModelName and the ScoreThreshold of an MLModel.

You can use the GetMLModel operation to view the contents of the updated data element.

See Also:

AWS API Reference

◆ UpdateMLModelAsync()

virtual void Aws::MachineLearning::MachineLearningClient::UpdateMLModelAsync ( const Model::UpdateMLModelRequest request,
const UpdateMLModelResponseReceivedHandler handler,
const std::shared_ptr< const Aws::Client::AsyncCallerContext > &  context = nullptr 
) const
virtual

Updates the MLModelName and the ScoreThreshold of an MLModel.

You can use the GetMLModel operation to view the contents of the updated data element.

See Also:

AWS API Reference

Queues the request into a thread executor and triggers associated callback when operation has finished.

◆ UpdateMLModelCallable()

virtual Model::UpdateMLModelOutcomeCallable Aws::MachineLearning::MachineLearningClient::UpdateMLModelCallable ( const Model::UpdateMLModelRequest request) const
virtual

Updates the MLModelName and the ScoreThreshold of an MLModel.

You can use the GetMLModel operation to view the contents of the updated data element.

See Also:

AWS API Reference

returns a future to the operation so that it can be executed in parallel to other requests.


The documentation for this class was generated from the following file: