[][src]Trait rusoto_sagemaker::SageMaker

pub trait SageMaker {
    fn add_tags(
        &self,
        input: AddTagsInput
    ) -> RusotoFuture<AddTagsOutput, AddTagsError>;
fn create_algorithm(
        &self,
        input: CreateAlgorithmInput
    ) -> RusotoFuture<CreateAlgorithmOutput, CreateAlgorithmError>;
fn create_code_repository(
        &self,
        input: CreateCodeRepositoryInput
    ) -> RusotoFuture<CreateCodeRepositoryOutput, CreateCodeRepositoryError>;
fn create_compilation_job(
        &self,
        input: CreateCompilationJobRequest
    ) -> RusotoFuture<CreateCompilationJobResponse, CreateCompilationJobError>;
fn create_endpoint(
        &self,
        input: CreateEndpointInput
    ) -> RusotoFuture<CreateEndpointOutput, CreateEndpointError>;
fn create_endpoint_config(
        &self,
        input: CreateEndpointConfigInput
    ) -> RusotoFuture<CreateEndpointConfigOutput, CreateEndpointConfigError>;
fn create_hyper_parameter_tuning_job(
        &self,
        input: CreateHyperParameterTuningJobRequest
    ) -> RusotoFuture<CreateHyperParameterTuningJobResponse, CreateHyperParameterTuningJobError>;
fn create_labeling_job(
        &self,
        input: CreateLabelingJobRequest
    ) -> RusotoFuture<CreateLabelingJobResponse, CreateLabelingJobError>;
fn create_model(
        &self,
        input: CreateModelInput
    ) -> RusotoFuture<CreateModelOutput, CreateModelError>;
fn create_model_package(
        &self,
        input: CreateModelPackageInput
    ) -> RusotoFuture<CreateModelPackageOutput, CreateModelPackageError>;
fn create_notebook_instance(
        &self,
        input: CreateNotebookInstanceInput
    ) -> RusotoFuture<CreateNotebookInstanceOutput, CreateNotebookInstanceError>;
fn create_notebook_instance_lifecycle_config(
        &self,
        input: CreateNotebookInstanceLifecycleConfigInput
    ) -> RusotoFuture<CreateNotebookInstanceLifecycleConfigOutput, CreateNotebookInstanceLifecycleConfigError>;
fn create_presigned_notebook_instance_url(
        &self,
        input: CreatePresignedNotebookInstanceUrlInput
    ) -> RusotoFuture<CreatePresignedNotebookInstanceUrlOutput, CreatePresignedNotebookInstanceUrlError>;
fn create_training_job(
        &self,
        input: CreateTrainingJobRequest
    ) -> RusotoFuture<CreateTrainingJobResponse, CreateTrainingJobError>;
fn create_transform_job(
        &self,
        input: CreateTransformJobRequest
    ) -> RusotoFuture<CreateTransformJobResponse, CreateTransformJobError>;
fn create_workteam(
        &self,
        input: CreateWorkteamRequest
    ) -> RusotoFuture<CreateWorkteamResponse, CreateWorkteamError>;
fn delete_algorithm(
        &self,
        input: DeleteAlgorithmInput
    ) -> RusotoFuture<(), DeleteAlgorithmError>;
fn delete_code_repository(
        &self,
        input: DeleteCodeRepositoryInput
    ) -> RusotoFuture<(), DeleteCodeRepositoryError>;
fn delete_endpoint(
        &self,
        input: DeleteEndpointInput
    ) -> RusotoFuture<(), DeleteEndpointError>;
fn delete_endpoint_config(
        &self,
        input: DeleteEndpointConfigInput
    ) -> RusotoFuture<(), DeleteEndpointConfigError>;
fn delete_model(
        &self,
        input: DeleteModelInput
    ) -> RusotoFuture<(), DeleteModelError>;
fn delete_model_package(
        &self,
        input: DeleteModelPackageInput
    ) -> RusotoFuture<(), DeleteModelPackageError>;
fn delete_notebook_instance(
        &self,
        input: DeleteNotebookInstanceInput
    ) -> RusotoFuture<(), DeleteNotebookInstanceError>;
fn delete_notebook_instance_lifecycle_config(
        &self,
        input: DeleteNotebookInstanceLifecycleConfigInput
    ) -> RusotoFuture<(), DeleteNotebookInstanceLifecycleConfigError>;
fn delete_tags(
        &self,
        input: DeleteTagsInput
    ) -> RusotoFuture<DeleteTagsOutput, DeleteTagsError>;
fn delete_workteam(
        &self,
        input: DeleteWorkteamRequest
    ) -> RusotoFuture<DeleteWorkteamResponse, DeleteWorkteamError>;
fn describe_algorithm(
        &self,
        input: DescribeAlgorithmInput
    ) -> RusotoFuture<DescribeAlgorithmOutput, DescribeAlgorithmError>;
fn describe_code_repository(
        &self,
        input: DescribeCodeRepositoryInput
    ) -> RusotoFuture<DescribeCodeRepositoryOutput, DescribeCodeRepositoryError>;
fn describe_compilation_job(
        &self,
        input: DescribeCompilationJobRequest
    ) -> RusotoFuture<DescribeCompilationJobResponse, DescribeCompilationJobError>;
fn describe_endpoint(
        &self,
        input: DescribeEndpointInput
    ) -> RusotoFuture<DescribeEndpointOutput, DescribeEndpointError>;
fn describe_endpoint_config(
        &self,
        input: DescribeEndpointConfigInput
    ) -> RusotoFuture<DescribeEndpointConfigOutput, DescribeEndpointConfigError>;
fn describe_hyper_parameter_tuning_job(
        &self,
        input: DescribeHyperParameterTuningJobRequest
    ) -> RusotoFuture<DescribeHyperParameterTuningJobResponse, DescribeHyperParameterTuningJobError>;
fn describe_labeling_job(
        &self,
        input: DescribeLabelingJobRequest
    ) -> RusotoFuture<DescribeLabelingJobResponse, DescribeLabelingJobError>;
fn describe_model(
        &self,
        input: DescribeModelInput
    ) -> RusotoFuture<DescribeModelOutput, DescribeModelError>;
fn describe_model_package(
        &self,
        input: DescribeModelPackageInput
    ) -> RusotoFuture<DescribeModelPackageOutput, DescribeModelPackageError>;
fn describe_notebook_instance(
        &self,
        input: DescribeNotebookInstanceInput
    ) -> RusotoFuture<DescribeNotebookInstanceOutput, DescribeNotebookInstanceError>;
fn describe_notebook_instance_lifecycle_config(
        &self,
        input: DescribeNotebookInstanceLifecycleConfigInput
    ) -> RusotoFuture<DescribeNotebookInstanceLifecycleConfigOutput, DescribeNotebookInstanceLifecycleConfigError>;
fn describe_subscribed_workteam(
        &self,
        input: DescribeSubscribedWorkteamRequest
    ) -> RusotoFuture<DescribeSubscribedWorkteamResponse, DescribeSubscribedWorkteamError>;
fn describe_training_job(
        &self,
        input: DescribeTrainingJobRequest
    ) -> RusotoFuture<DescribeTrainingJobResponse, DescribeTrainingJobError>;
fn describe_transform_job(
        &self,
        input: DescribeTransformJobRequest
    ) -> RusotoFuture<DescribeTransformJobResponse, DescribeTransformJobError>;
fn describe_workteam(
        &self,
        input: DescribeWorkteamRequest
    ) -> RusotoFuture<DescribeWorkteamResponse, DescribeWorkteamError>;
fn get_search_suggestions(
        &self,
        input: GetSearchSuggestionsRequest
    ) -> RusotoFuture<GetSearchSuggestionsResponse, GetSearchSuggestionsError>;
fn list_algorithms(
        &self,
        input: ListAlgorithmsInput
    ) -> RusotoFuture<ListAlgorithmsOutput, ListAlgorithmsError>;
fn list_code_repositories(
        &self,
        input: ListCodeRepositoriesInput
    ) -> RusotoFuture<ListCodeRepositoriesOutput, ListCodeRepositoriesError>;
fn list_compilation_jobs(
        &self,
        input: ListCompilationJobsRequest
    ) -> RusotoFuture<ListCompilationJobsResponse, ListCompilationJobsError>;
fn list_endpoint_configs(
        &self,
        input: ListEndpointConfigsInput
    ) -> RusotoFuture<ListEndpointConfigsOutput, ListEndpointConfigsError>;
fn list_endpoints(
        &self,
        input: ListEndpointsInput
    ) -> RusotoFuture<ListEndpointsOutput, ListEndpointsError>;
fn list_hyper_parameter_tuning_jobs(
        &self,
        input: ListHyperParameterTuningJobsRequest
    ) -> RusotoFuture<ListHyperParameterTuningJobsResponse, ListHyperParameterTuningJobsError>;
fn list_labeling_jobs(
        &self,
        input: ListLabelingJobsRequest
    ) -> RusotoFuture<ListLabelingJobsResponse, ListLabelingJobsError>;
fn list_labeling_jobs_for_workteam(
        &self,
        input: ListLabelingJobsForWorkteamRequest
    ) -> RusotoFuture<ListLabelingJobsForWorkteamResponse, ListLabelingJobsForWorkteamError>;
fn list_model_packages(
        &self,
        input: ListModelPackagesInput
    ) -> RusotoFuture<ListModelPackagesOutput, ListModelPackagesError>;
fn list_models(
        &self,
        input: ListModelsInput
    ) -> RusotoFuture<ListModelsOutput, ListModelsError>;
fn list_notebook_instance_lifecycle_configs(
        &self,
        input: ListNotebookInstanceLifecycleConfigsInput
    ) -> RusotoFuture<ListNotebookInstanceLifecycleConfigsOutput, ListNotebookInstanceLifecycleConfigsError>;
fn list_notebook_instances(
        &self,
        input: ListNotebookInstancesInput
    ) -> RusotoFuture<ListNotebookInstancesOutput, ListNotebookInstancesError>;
fn list_subscribed_workteams(
        &self,
        input: ListSubscribedWorkteamsRequest
    ) -> RusotoFuture<ListSubscribedWorkteamsResponse, ListSubscribedWorkteamsError>;
fn list_tags(
        &self,
        input: ListTagsInput
    ) -> RusotoFuture<ListTagsOutput, ListTagsError>;
fn list_training_jobs(
        &self,
        input: ListTrainingJobsRequest
    ) -> RusotoFuture<ListTrainingJobsResponse, ListTrainingJobsError>;
fn list_training_jobs_for_hyper_parameter_tuning_job(
        &self,
        input: ListTrainingJobsForHyperParameterTuningJobRequest
    ) -> RusotoFuture<ListTrainingJobsForHyperParameterTuningJobResponse, ListTrainingJobsForHyperParameterTuningJobError>;
fn list_transform_jobs(
        &self,
        input: ListTransformJobsRequest
    ) -> RusotoFuture<ListTransformJobsResponse, ListTransformJobsError>;
fn list_workteams(
        &self,
        input: ListWorkteamsRequest
    ) -> RusotoFuture<ListWorkteamsResponse, ListWorkteamsError>;
fn render_ui_template(
        &self,
        input: RenderUiTemplateRequest
    ) -> RusotoFuture<RenderUiTemplateResponse, RenderUiTemplateError>;
fn search(
        &self,
        input: SearchRequest
    ) -> RusotoFuture<SearchResponse, SearchError>;
fn start_notebook_instance(
        &self,
        input: StartNotebookInstanceInput
    ) -> RusotoFuture<(), StartNotebookInstanceError>;
fn stop_compilation_job(
        &self,
        input: StopCompilationJobRequest
    ) -> RusotoFuture<(), StopCompilationJobError>;
fn stop_hyper_parameter_tuning_job(
        &self,
        input: StopHyperParameterTuningJobRequest
    ) -> RusotoFuture<(), StopHyperParameterTuningJobError>;
fn stop_labeling_job(
        &self,
        input: StopLabelingJobRequest
    ) -> RusotoFuture<(), StopLabelingJobError>;
fn stop_notebook_instance(
        &self,
        input: StopNotebookInstanceInput
    ) -> RusotoFuture<(), StopNotebookInstanceError>;
fn stop_training_job(
        &self,
        input: StopTrainingJobRequest
    ) -> RusotoFuture<(), StopTrainingJobError>;
fn stop_transform_job(
        &self,
        input: StopTransformJobRequest
    ) -> RusotoFuture<(), StopTransformJobError>;
fn update_code_repository(
        &self,
        input: UpdateCodeRepositoryInput
    ) -> RusotoFuture<UpdateCodeRepositoryOutput, UpdateCodeRepositoryError>;
fn update_endpoint(
        &self,
        input: UpdateEndpointInput
    ) -> RusotoFuture<UpdateEndpointOutput, UpdateEndpointError>;
fn update_endpoint_weights_and_capacities(
        &self,
        input: UpdateEndpointWeightsAndCapacitiesInput
    ) -> RusotoFuture<UpdateEndpointWeightsAndCapacitiesOutput, UpdateEndpointWeightsAndCapacitiesError>;
fn update_notebook_instance(
        &self,
        input: UpdateNotebookInstanceInput
    ) -> RusotoFuture<UpdateNotebookInstanceOutput, UpdateNotebookInstanceError>;
fn update_notebook_instance_lifecycle_config(
        &self,
        input: UpdateNotebookInstanceLifecycleConfigInput
    ) -> RusotoFuture<UpdateNotebookInstanceLifecycleConfigOutput, UpdateNotebookInstanceLifecycleConfigError>;
fn update_workteam(
        &self,
        input: UpdateWorkteamRequest
    ) -> RusotoFuture<UpdateWorkteamResponse, UpdateWorkteamError>; }

Trait representing the capabilities of the SageMaker API. SageMaker clients implement this trait.

Required methods

fn add_tags(
    &self,
    input: AddTagsInput
) -> RusotoFuture<AddTagsOutput, AddTagsError>

Adds or overwrites one or more tags for the specified Amazon SageMaker resource. You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints.

Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see For more information, see AWS Tagging Strategies.

Tags that you add to a hyperparameter tuning job by calling this API are also added to any training jobs that the hyperparameter tuning job launches after you call this API, but not to training jobs that the hyperparameter tuning job launched before you called this API. To make sure that the tags associated with a hyperparameter tuning job are also added to all training jobs that the hyperparameter tuning job launches, add the tags when you first create the tuning job by specifying them in the Tags parameter of CreateHyperParameterTuningJob

fn create_algorithm(
    &self,
    input: CreateAlgorithmInput
) -> RusotoFuture<CreateAlgorithmOutput, CreateAlgorithmError>

Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace.

fn create_code_repository(
    &self,
    input: CreateCodeRepositoryInput
) -> RusotoFuture<CreateCodeRepositoryOutput, CreateCodeRepositoryError>

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

fn create_compilation_job(
    &self,
    input: CreateCompilationJobRequest
) -> RusotoFuture<CreateCompilationJobResponse, CreateCompilationJobError>

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

  • A name for the compilation job

  • Information about the input model artifacts

  • The output location for the compiled model and the device (target) that the model runs on

  • The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation job

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

fn create_endpoint(
    &self,
    input: CreateEndpointInput
) -> RusotoFuture<CreateEndpointOutput, CreateEndpointError>

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

fn create_endpoint_config(
    &self,
    input: CreateEndpointConfigInput
) -> RusotoFuture<CreateEndpointConfigOutput, CreateEndpointConfigError>

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API only if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define one or more ProductionVariants, each of which identifies a model. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

fn create_hyper_parameter_tuning_job(
    &self,
    input: CreateHyperParameterTuningJobRequest
) -> RusotoFuture<CreateHyperParameterTuningJobResponse, CreateHyperParameterTuningJobError>

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.

fn create_labeling_job(
    &self,
    input: CreateLabelingJobRequest
) -> RusotoFuture<CreateLabelingJobResponse, CreateLabelingJobError>

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

  • A private workforce that you create. It can include employees, contractors, and outside experts. Use a private workforce when want the data to stay within your organization or when a specific set of skills is required.

  • One or more vendors that you select from the AWS Marketplace. Vendors provide expertise in specific areas.

  • The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data or data that has been stripped of any personally identifiable information.

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

fn create_model(
    &self,
    input: CreateModelInput
) -> RusotoFuture<CreateModelOutput, CreateModelError>

Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the docker image containing inference code, artifacts (from prior training), and custom environment map that the inference code uses when you deploy the model for predictions.

Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.

To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

To run a batch transform using your model, you start a job with the CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

fn create_model_package(
    &self,
    input: CreateModelPackageInput
) -> RusotoFuture<CreateModelPackageOutput, CreateModelPackageError>

Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification. To create a model from an algorithm resource that you created or subscribed to in AWS Marketplace, provide a value for SourceAlgorithmSpecification.

fn create_notebook_instance(
    &self,
    input: CreateNotebookInstanceInput
) -> RusotoFuture<CreateNotebookInstanceOutput, CreateNotebookInstanceError>

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

fn create_notebook_instance_lifecycle_config(
    &self,
    input: CreateNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<CreateNotebookInstanceLifecycleConfigOutput, CreateNotebookInstanceLifecycleConfigError>

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

fn create_presigned_notebook_instance_url(
    &self,
    input: CreatePresignedNotebookInstanceUrlInput
) -> RusotoFuture<CreatePresignedNotebookInstanceUrlOutput, CreatePresignedNotebookInstanceUrlError>

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. To restrict access, attach an IAM policy that denies access to this API unless the call comes from an IP address in the specified list to every AWS Identity and Access Management user, group, or role used to access the notebook instance. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

fn create_training_job(
    &self,
    input: CreateTrainingJobRequest
) -> RusotoFuture<CreateTrainingJobResponse, CreateTrainingJobError>

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to influence the quality of the final model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3 location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - Sets a duration for training. Use this parameter to cap model training costs.

For more information about Amazon SageMaker, see How It Works.

fn create_transform_job(
    &self,
    input: CreateTransformJobRequest
) -> RusotoFuture<CreateTransformJobResponse, CreateTransformJobError>

Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

To perform batch transformations, you create a transform job and use the data that you have readily available.

In the request body, you provide the following:

  • TransformJobName - Identifies the transform job. The name must be unique within an AWS Region in an AWS account.

  • ModelName - Identifies the model to use. ModelName must be the name of an existing Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see CreateModel.

  • TransformInput - Describes the dataset to be transformed and the Amazon S3 location where it is stored.

  • TransformOutput - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

  • TransformResources - Identifies the ML compute instances for the transform job.

For more information about how batch transformation works Amazon SageMaker, see How It Works.

fn create_workteam(
    &self,
    input: CreateWorkteamRequest
) -> RusotoFuture<CreateWorkteamResponse, CreateWorkteamError>

Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.

You cannot create more than 25 work teams in an account and region.

fn delete_algorithm(
    &self,
    input: DeleteAlgorithmInput
) -> RusotoFuture<(), DeleteAlgorithmError>

Removes the specified algorithm from your account.

fn delete_code_repository(
    &self,
    input: DeleteCodeRepositoryInput
) -> RusotoFuture<(), DeleteCodeRepositoryError>

Deletes the specified Git repository from your account.

fn delete_endpoint(
    &self,
    input: DeleteEndpointInput
) -> RusotoFuture<(), DeleteEndpointError>

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were deployed when the endpoint was created.

Amazon SageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't need to use the RevokeGrant API call.

fn delete_endpoint_config(
    &self,
    input: DeleteEndpointConfigInput
) -> RusotoFuture<(), DeleteEndpointConfigError>

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

fn delete_model(
    &self,
    input: DeleteModelInput
) -> RusotoFuture<(), DeleteModelError>

Deletes a model. The DeleteModel API deletes only the model entry that was created in Amazon SageMaker when you called the CreateModel API. It does not delete model artifacts, inference code, or the IAM role that you specified when creating the model.

fn delete_model_package(
    &self,
    input: DeleteModelPackageInput
) -> RusotoFuture<(), DeleteModelPackageError>

Deletes a model package.

A model package is used to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

fn delete_notebook_instance(
    &self,
    input: DeleteNotebookInstanceInput
) -> RusotoFuture<(), DeleteNotebookInstanceError>

Deletes an Amazon SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API.

When you delete a notebook instance, you lose all of your data. Amazon SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance.

fn delete_notebook_instance_lifecycle_config(
    &self,
    input: DeleteNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<(), DeleteNotebookInstanceLifecycleConfigError>

Deletes a notebook instance lifecycle configuration.

fn delete_tags(
    &self,
    input: DeleteTagsInput
) -> RusotoFuture<DeleteTagsOutput, DeleteTagsError>

Deletes the specified tags from an Amazon SageMaker resource.

To list a resource's tags, use the ListTags API.

When you call this API to delete tags from a hyperparameter tuning job, the deleted tags are not removed from training jobs that the hyperparameter tuning job launched before you called this API.

fn delete_workteam(
    &self,
    input: DeleteWorkteamRequest
) -> RusotoFuture<DeleteWorkteamResponse, DeleteWorkteamError>

Deletes an existing work team. This operation can't be undone.

fn describe_algorithm(
    &self,
    input: DescribeAlgorithmInput
) -> RusotoFuture<DescribeAlgorithmOutput, DescribeAlgorithmError>

Returns a description of the specified algorithm that is in your account.

fn describe_code_repository(
    &self,
    input: DescribeCodeRepositoryInput
) -> RusotoFuture<DescribeCodeRepositoryOutput, DescribeCodeRepositoryError>

Gets details about the specified Git repository.

fn describe_compilation_job(
    &self,
    input: DescribeCompilationJobRequest
) -> RusotoFuture<DescribeCompilationJobResponse, DescribeCompilationJobError>

Returns information about a model compilation job.

To create a model compilation job, use CreateCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

fn describe_endpoint(
    &self,
    input: DescribeEndpointInput
) -> RusotoFuture<DescribeEndpointOutput, DescribeEndpointError>

Returns the description of an endpoint.

fn describe_endpoint_config(
    &self,
    input: DescribeEndpointConfigInput
) -> RusotoFuture<DescribeEndpointConfigOutput, DescribeEndpointConfigError>

Returns the description of an endpoint configuration created using the CreateEndpointConfig API.

fn describe_hyper_parameter_tuning_job(
    &self,
    input: DescribeHyperParameterTuningJobRequest
) -> RusotoFuture<DescribeHyperParameterTuningJobResponse, DescribeHyperParameterTuningJobError>

Gets a description of a hyperparameter tuning job.

fn describe_labeling_job(
    &self,
    input: DescribeLabelingJobRequest
) -> RusotoFuture<DescribeLabelingJobResponse, DescribeLabelingJobError>

Gets information about a labeling job.

fn describe_model(
    &self,
    input: DescribeModelInput
) -> RusotoFuture<DescribeModelOutput, DescribeModelError>

Describes a model that you created using the CreateModel API.

fn describe_model_package(
    &self,
    input: DescribeModelPackageInput
) -> RusotoFuture<DescribeModelPackageOutput, DescribeModelPackageError>

Returns a description of the specified model package, which is used to create Amazon SageMaker models or list them on AWS Marketplace.

To create models in Amazon SageMaker, buyers can subscribe to model packages listed on AWS Marketplace.

fn describe_notebook_instance(
    &self,
    input: DescribeNotebookInstanceInput
) -> RusotoFuture<DescribeNotebookInstanceOutput, DescribeNotebookInstanceError>

Returns information about a notebook instance.

fn describe_notebook_instance_lifecycle_config(
    &self,
    input: DescribeNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<DescribeNotebookInstanceLifecycleConfigOutput, DescribeNotebookInstanceLifecycleConfigError>

Returns a description of a notebook instance lifecycle configuration.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

fn describe_subscribed_workteam(
    &self,
    input: DescribeSubscribedWorkteamRequest
) -> RusotoFuture<DescribeSubscribedWorkteamResponse, DescribeSubscribedWorkteamError>

Gets information about a work team provided by a vendor. It returns details about the subscription with a vendor in the AWS Marketplace.

fn describe_training_job(
    &self,
    input: DescribeTrainingJobRequest
) -> RusotoFuture<DescribeTrainingJobResponse, DescribeTrainingJobError>

Returns information about a training job.

fn describe_transform_job(
    &self,
    input: DescribeTransformJobRequest
) -> RusotoFuture<DescribeTransformJobResponse, DescribeTransformJobError>

Returns information about a transform job.

fn describe_workteam(
    &self,
    input: DescribeWorkteamRequest
) -> RusotoFuture<DescribeWorkteamResponse, DescribeWorkteamError>

Gets information about a specific work team. You can see information such as the create date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN).

fn get_search_suggestions(
    &self,
    input: GetSearchSuggestionsRequest
) -> RusotoFuture<GetSearchSuggestionsResponse, GetSearchSuggestionsError>

An auto-complete API for the search functionality in the Amazon SageMaker console. It returns suggestions of possible matches for the property name to use in Search queries. Provides suggestions for HyperParameters, Tags, and Metrics.

fn list_algorithms(
    &self,
    input: ListAlgorithmsInput
) -> RusotoFuture<ListAlgorithmsOutput, ListAlgorithmsError>

Lists the machine learning algorithms that have been created.

fn list_code_repositories(
    &self,
    input: ListCodeRepositoriesInput
) -> RusotoFuture<ListCodeRepositoriesOutput, ListCodeRepositoriesError>

Gets a list of the Git repositories in your account.

fn list_compilation_jobs(
    &self,
    input: ListCompilationJobsRequest
) -> RusotoFuture<ListCompilationJobsResponse, ListCompilationJobsError>

Lists model compilation jobs that satisfy various filters.

To create a model compilation job, use CreateCompilationJob. To get information about a particular model compilation job you have created, use DescribeCompilationJob.

fn list_endpoint_configs(
    &self,
    input: ListEndpointConfigsInput
) -> RusotoFuture<ListEndpointConfigsOutput, ListEndpointConfigsError>

Lists endpoint configurations.

fn list_endpoints(
    &self,
    input: ListEndpointsInput
) -> RusotoFuture<ListEndpointsOutput, ListEndpointsError>

Lists endpoints.

fn list_hyper_parameter_tuning_jobs(
    &self,
    input: ListHyperParameterTuningJobsRequest
) -> RusotoFuture<ListHyperParameterTuningJobsResponse, ListHyperParameterTuningJobsError>

Gets a list of HyperParameterTuningJobSummary objects that describe the hyperparameter tuning jobs launched in your account.

fn list_labeling_jobs(
    &self,
    input: ListLabelingJobsRequest
) -> RusotoFuture<ListLabelingJobsResponse, ListLabelingJobsError>

Gets a list of labeling jobs.

fn list_labeling_jobs_for_workteam(
    &self,
    input: ListLabelingJobsForWorkteamRequest
) -> RusotoFuture<ListLabelingJobsForWorkteamResponse, ListLabelingJobsForWorkteamError>

Gets a list of labeling jobs assigned to a specified work team.

fn list_model_packages(
    &self,
    input: ListModelPackagesInput
) -> RusotoFuture<ListModelPackagesOutput, ListModelPackagesError>

Lists the model packages that have been created.

fn list_models(
    &self,
    input: ListModelsInput
) -> RusotoFuture<ListModelsOutput, ListModelsError>

Lists models created with the CreateModel API.

fn list_notebook_instance_lifecycle_configs(
    &self,
    input: ListNotebookInstanceLifecycleConfigsInput
) -> RusotoFuture<ListNotebookInstanceLifecycleConfigsOutput, ListNotebookInstanceLifecycleConfigsError>

Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API.

fn list_notebook_instances(
    &self,
    input: ListNotebookInstancesInput
) -> RusotoFuture<ListNotebookInstancesOutput, ListNotebookInstancesError>

Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region.

fn list_subscribed_workteams(
    &self,
    input: ListSubscribedWorkteamsRequest
) -> RusotoFuture<ListSubscribedWorkteamsResponse, ListSubscribedWorkteamsError>

Gets a list of the work teams that you are subscribed to in the AWS Marketplace. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

fn list_tags(
    &self,
    input: ListTagsInput
) -> RusotoFuture<ListTagsOutput, ListTagsError>

Returns the tags for the specified Amazon SageMaker resource.

fn list_training_jobs(
    &self,
    input: ListTrainingJobsRequest
) -> RusotoFuture<ListTrainingJobsResponse, ListTrainingJobsError>

Lists training jobs.

fn list_training_jobs_for_hyper_parameter_tuning_job(
    &self,
    input: ListTrainingJobsForHyperParameterTuningJobRequest
) -> RusotoFuture<ListTrainingJobsForHyperParameterTuningJobResponse, ListTrainingJobsForHyperParameterTuningJobError>

Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched.

fn list_transform_jobs(
    &self,
    input: ListTransformJobsRequest
) -> RusotoFuture<ListTransformJobsResponse, ListTransformJobsError>

Lists transform jobs.

fn list_workteams(
    &self,
    input: ListWorkteamsRequest
) -> RusotoFuture<ListWorkteamsResponse, ListWorkteamsError>

Gets a list of work teams that you have defined in a region. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

fn render_ui_template(
    &self,
    input: RenderUiTemplateRequest
) -> RusotoFuture<RenderUiTemplateResponse, RenderUiTemplateError>

Renders the UI template so that you can preview the worker's experience.

fn search(
    &self,
    input: SearchRequest
) -> RusotoFuture<SearchResponse, SearchError>

Finds Amazon SageMaker resources that match a search query. Matching resource objects are returned as a list of SearchResult objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numerical, text, Booleans, and timestamps.

fn start_notebook_instance(
    &self,
    input: StartNotebookInstanceInput
) -> RusotoFuture<(), StartNotebookInstanceError>

Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook.

fn stop_compilation_job(
    &self,
    input: StopCompilationJobRequest
) -> RusotoFuture<(), StopCompilationJobError>

Stops a model compilation job.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal.

When it receives a StopCompilationJob request, Amazon SageMaker changes the CompilationJobSummary$CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobSummary$CompilationJobStatus to Stopped.

fn stop_hyper_parameter_tuning_job(
    &self,
    input: StopHyperParameterTuningJobRequest
) -> RusotoFuture<(), StopHyperParameterTuningJobError>

Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched.

All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the Stopped state, it releases all reserved resources for the tuning job.

fn stop_labeling_job(
    &self,
    input: StopLabelingJobRequest
) -> RusotoFuture<(), StopLabelingJobError>

Stops a running labeling job. A job that is stopped cannot be restarted. Any results obtained before the job is stopped are placed in the Amazon S3 output bucket.

fn stop_notebook_instance(
    &self,
    input: StopNotebookInstanceInput
) -> RusotoFuture<(), StopNotebookInstanceError>

Terminates the ML compute instance. Before terminating the instance, Amazon SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves the ML storage volume. Amazon SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

fn stop_training_job(
    &self,
    input: StopTrainingJobRequest
) -> RusotoFuture<(), StopTrainingJobError>

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.

When it receives a StopTrainingJob request, Amazon SageMaker changes the status of the job to Stopping. After Amazon SageMaker stops the job, it sets the status to Stopped.

fn stop_transform_job(
    &self,
    input: StopTransformJobRequest
) -> RusotoFuture<(), StopTransformJobError>

Stops a transform job.

When Amazon SageMaker receives a StopTransformJob request, the status of the job changes to Stopping. After Amazon SageMaker stops the job, the status is set to Stopped. When you stop a transform job before it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

fn update_code_repository(
    &self,
    input: UpdateCodeRepositoryInput
) -> RusotoFuture<UpdateCodeRepositoryOutput, UpdateCodeRepositoryError>

Updates the specified Git repository with the specified values.

fn update_endpoint(
    &self,
    input: UpdateEndpointInput
) -> RusotoFuture<UpdateEndpointOutput, UpdateEndpointError>

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

fn update_endpoint_weights_and_capacities(
    &self,
    input: UpdateEndpointWeightsAndCapacitiesInput
) -> RusotoFuture<UpdateEndpointWeightsAndCapacitiesOutput, UpdateEndpointWeightsAndCapacitiesError>

Updates variant weight of one or more variants associated with an existing endpoint, or capacity of one variant associated with an existing endpoint. When it receives the request, Amazon SageMaker sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

fn update_notebook_instance(
    &self,
    input: UpdateNotebookInstanceInput
) -> RusotoFuture<UpdateNotebookInstanceOutput, UpdateNotebookInstanceError>

Updates a notebook instance. NotebookInstance updates include upgrading or downgrading the ML compute instance used for your notebook instance to accommodate changes in your workload requirements.

fn update_notebook_instance_lifecycle_config(
    &self,
    input: UpdateNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<UpdateNotebookInstanceLifecycleConfigOutput, UpdateNotebookInstanceLifecycleConfigError>

Updates a notebook instance lifecycle configuration created with the CreateNotebookInstanceLifecycleConfig API.

fn update_workteam(
    &self,
    input: UpdateWorkteamRequest
) -> RusotoFuture<UpdateWorkteamResponse, UpdateWorkteamError>

Updates an existing work team with new member definitions or description.

Loading content...

Implementors

impl SageMaker for SageMakerClient[src]

fn add_tags(
    &self,
    input: AddTagsInput
) -> RusotoFuture<AddTagsOutput, AddTagsError>
[src]

Adds or overwrites one or more tags for the specified Amazon SageMaker resource. You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints.

Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see For more information, see AWS Tagging Strategies.

Tags that you add to a hyperparameter tuning job by calling this API are also added to any training jobs that the hyperparameter tuning job launches after you call this API, but not to training jobs that the hyperparameter tuning job launched before you called this API. To make sure that the tags associated with a hyperparameter tuning job are also added to all training jobs that the hyperparameter tuning job launches, add the tags when you first create the tuning job by specifying them in the Tags parameter of CreateHyperParameterTuningJob

fn create_algorithm(
    &self,
    input: CreateAlgorithmInput
) -> RusotoFuture<CreateAlgorithmOutput, CreateAlgorithmError>
[src]

Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace.

fn create_code_repository(
    &self,
    input: CreateCodeRepositoryInput
) -> RusotoFuture<CreateCodeRepositoryOutput, CreateCodeRepositoryError>
[src]

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

fn create_compilation_job(
    &self,
    input: CreateCompilationJobRequest
) -> RusotoFuture<CreateCompilationJobResponse, CreateCompilationJobError>
[src]

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

  • A name for the compilation job

  • Information about the input model artifacts

  • The output location for the compiled model and the device (target) that the model runs on

  • The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation job

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

fn create_endpoint(
    &self,
    input: CreateEndpointInput
) -> RusotoFuture<CreateEndpointOutput, CreateEndpointError>
[src]

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

fn create_endpoint_config(
    &self,
    input: CreateEndpointConfigInput
) -> RusotoFuture<CreateEndpointConfigOutput, CreateEndpointConfigError>
[src]

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API only if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define one or more ProductionVariants, each of which identifies a model. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

fn create_hyper_parameter_tuning_job(
    &self,
    input: CreateHyperParameterTuningJobRequest
) -> RusotoFuture<CreateHyperParameterTuningJobResponse, CreateHyperParameterTuningJobError>
[src]

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.

fn create_labeling_job(
    &self,
    input: CreateLabelingJobRequest
) -> RusotoFuture<CreateLabelingJobResponse, CreateLabelingJobError>
[src]

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

  • A private workforce that you create. It can include employees, contractors, and outside experts. Use a private workforce when want the data to stay within your organization or when a specific set of skills is required.

  • One or more vendors that you select from the AWS Marketplace. Vendors provide expertise in specific areas.

  • The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data or data that has been stripped of any personally identifiable information.

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

fn create_model(
    &self,
    input: CreateModelInput
) -> RusotoFuture<CreateModelOutput, CreateModelError>
[src]

Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the docker image containing inference code, artifacts (from prior training), and custom environment map that the inference code uses when you deploy the model for predictions.

Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.

To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

To run a batch transform using your model, you start a job with the CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

fn create_model_package(
    &self,
    input: CreateModelPackageInput
) -> RusotoFuture<CreateModelPackageOutput, CreateModelPackageError>
[src]

Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification. To create a model from an algorithm resource that you created or subscribed to in AWS Marketplace, provide a value for SourceAlgorithmSpecification.

fn create_notebook_instance(
    &self,
    input: CreateNotebookInstanceInput
) -> RusotoFuture<CreateNotebookInstanceOutput, CreateNotebookInstanceError>
[src]

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

fn create_notebook_instance_lifecycle_config(
    &self,
    input: CreateNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<CreateNotebookInstanceLifecycleConfigOutput, CreateNotebookInstanceLifecycleConfigError>
[src]

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

fn create_presigned_notebook_instance_url(
    &self,
    input: CreatePresignedNotebookInstanceUrlInput
) -> RusotoFuture<CreatePresignedNotebookInstanceUrlOutput, CreatePresignedNotebookInstanceUrlError>
[src]

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. To restrict access, attach an IAM policy that denies access to this API unless the call comes from an IP address in the specified list to every AWS Identity and Access Management user, group, or role used to access the notebook instance. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

fn create_training_job(
    &self,
    input: CreateTrainingJobRequest
) -> RusotoFuture<CreateTrainingJobResponse, CreateTrainingJobError>
[src]

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to influence the quality of the final model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3 location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - Sets a duration for training. Use this parameter to cap model training costs.

For more information about Amazon SageMaker, see How It Works.

fn create_transform_job(
    &self,
    input: CreateTransformJobRequest
) -> RusotoFuture<CreateTransformJobResponse, CreateTransformJobError>
[src]

Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

To perform batch transformations, you create a transform job and use the data that you have readily available.

In the request body, you provide the following:

  • TransformJobName - Identifies the transform job. The name must be unique within an AWS Region in an AWS account.

  • ModelName - Identifies the model to use. ModelName must be the name of an existing Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see CreateModel.

  • TransformInput - Describes the dataset to be transformed and the Amazon S3 location where it is stored.

  • TransformOutput - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

  • TransformResources - Identifies the ML compute instances for the transform job.

For more information about how batch transformation works Amazon SageMaker, see How It Works.

fn create_workteam(
    &self,
    input: CreateWorkteamRequest
) -> RusotoFuture<CreateWorkteamResponse, CreateWorkteamError>
[src]

Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.

You cannot create more than 25 work teams in an account and region.

fn delete_algorithm(
    &self,
    input: DeleteAlgorithmInput
) -> RusotoFuture<(), DeleteAlgorithmError>
[src]

Removes the specified algorithm from your account.

fn delete_code_repository(
    &self,
    input: DeleteCodeRepositoryInput
) -> RusotoFuture<(), DeleteCodeRepositoryError>
[src]

Deletes the specified Git repository from your account.

fn delete_endpoint(
    &self,
    input: DeleteEndpointInput
) -> RusotoFuture<(), DeleteEndpointError>
[src]

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were deployed when the endpoint was created.

Amazon SageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't need to use the RevokeGrant API call.

fn delete_endpoint_config(
    &self,
    input: DeleteEndpointConfigInput
) -> RusotoFuture<(), DeleteEndpointConfigError>
[src]

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

fn delete_model(
    &self,
    input: DeleteModelInput
) -> RusotoFuture<(), DeleteModelError>
[src]

Deletes a model. The DeleteModel API deletes only the model entry that was created in Amazon SageMaker when you called the CreateModel API. It does not delete model artifacts, inference code, or the IAM role that you specified when creating the model.

fn delete_model_package(
    &self,
    input: DeleteModelPackageInput
) -> RusotoFuture<(), DeleteModelPackageError>
[src]

Deletes a model package.

A model package is used to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

fn delete_notebook_instance(
    &self,
    input: DeleteNotebookInstanceInput
) -> RusotoFuture<(), DeleteNotebookInstanceError>
[src]

Deletes an Amazon SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API.

When you delete a notebook instance, you lose all of your data. Amazon SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance.

fn delete_notebook_instance_lifecycle_config(
    &self,
    input: DeleteNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<(), DeleteNotebookInstanceLifecycleConfigError>
[src]

Deletes a notebook instance lifecycle configuration.

fn delete_tags(
    &self,
    input: DeleteTagsInput
) -> RusotoFuture<DeleteTagsOutput, DeleteTagsError>
[src]

Deletes the specified tags from an Amazon SageMaker resource.

To list a resource's tags, use the ListTags API.

When you call this API to delete tags from a hyperparameter tuning job, the deleted tags are not removed from training jobs that the hyperparameter tuning job launched before you called this API.

fn delete_workteam(
    &self,
    input: DeleteWorkteamRequest
) -> RusotoFuture<DeleteWorkteamResponse, DeleteWorkteamError>
[src]

Deletes an existing work team. This operation can't be undone.

fn describe_algorithm(
    &self,
    input: DescribeAlgorithmInput
) -> RusotoFuture<DescribeAlgorithmOutput, DescribeAlgorithmError>
[src]

Returns a description of the specified algorithm that is in your account.

fn describe_code_repository(
    &self,
    input: DescribeCodeRepositoryInput
) -> RusotoFuture<DescribeCodeRepositoryOutput, DescribeCodeRepositoryError>
[src]

Gets details about the specified Git repository.

fn describe_compilation_job(
    &self,
    input: DescribeCompilationJobRequest
) -> RusotoFuture<DescribeCompilationJobResponse, DescribeCompilationJobError>
[src]

Returns information about a model compilation job.

To create a model compilation job, use CreateCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

fn describe_endpoint(
    &self,
    input: DescribeEndpointInput
) -> RusotoFuture<DescribeEndpointOutput, DescribeEndpointError>
[src]

Returns the description of an endpoint.

fn describe_endpoint_config(
    &self,
    input: DescribeEndpointConfigInput
) -> RusotoFuture<DescribeEndpointConfigOutput, DescribeEndpointConfigError>
[src]

Returns the description of an endpoint configuration created using the CreateEndpointConfig API.

fn describe_hyper_parameter_tuning_job(
    &self,
    input: DescribeHyperParameterTuningJobRequest
) -> RusotoFuture<DescribeHyperParameterTuningJobResponse, DescribeHyperParameterTuningJobError>
[src]

Gets a description of a hyperparameter tuning job.

fn describe_labeling_job(
    &self,
    input: DescribeLabelingJobRequest
) -> RusotoFuture<DescribeLabelingJobResponse, DescribeLabelingJobError>
[src]

Gets information about a labeling job.

fn describe_model(
    &self,
    input: DescribeModelInput
) -> RusotoFuture<DescribeModelOutput, DescribeModelError>
[src]

Describes a model that you created using the CreateModel API.

fn describe_model_package(
    &self,
    input: DescribeModelPackageInput
) -> RusotoFuture<DescribeModelPackageOutput, DescribeModelPackageError>
[src]

Returns a description of the specified model package, which is used to create Amazon SageMaker models or list them on AWS Marketplace.

To create models in Amazon SageMaker, buyers can subscribe to model packages listed on AWS Marketplace.

fn describe_notebook_instance(
    &self,
    input: DescribeNotebookInstanceInput
) -> RusotoFuture<DescribeNotebookInstanceOutput, DescribeNotebookInstanceError>
[src]

Returns information about a notebook instance.

fn describe_notebook_instance_lifecycle_config(
    &self,
    input: DescribeNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<DescribeNotebookInstanceLifecycleConfigOutput, DescribeNotebookInstanceLifecycleConfigError>
[src]

Returns a description of a notebook instance lifecycle configuration.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

fn describe_subscribed_workteam(
    &self,
    input: DescribeSubscribedWorkteamRequest
) -> RusotoFuture<DescribeSubscribedWorkteamResponse, DescribeSubscribedWorkteamError>
[src]

Gets information about a work team provided by a vendor. It returns details about the subscription with a vendor in the AWS Marketplace.

fn describe_training_job(
    &self,
    input: DescribeTrainingJobRequest
) -> RusotoFuture<DescribeTrainingJobResponse, DescribeTrainingJobError>
[src]

Returns information about a training job.

fn describe_transform_job(
    &self,
    input: DescribeTransformJobRequest
) -> RusotoFuture<DescribeTransformJobResponse, DescribeTransformJobError>
[src]

Returns information about a transform job.

fn describe_workteam(
    &self,
    input: DescribeWorkteamRequest
) -> RusotoFuture<DescribeWorkteamResponse, DescribeWorkteamError>
[src]

Gets information about a specific work team. You can see information such as the create date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN).

fn get_search_suggestions(
    &self,
    input: GetSearchSuggestionsRequest
) -> RusotoFuture<GetSearchSuggestionsResponse, GetSearchSuggestionsError>
[src]

An auto-complete API for the search functionality in the Amazon SageMaker console. It returns suggestions of possible matches for the property name to use in Search queries. Provides suggestions for HyperParameters, Tags, and Metrics.

fn list_algorithms(
    &self,
    input: ListAlgorithmsInput
) -> RusotoFuture<ListAlgorithmsOutput, ListAlgorithmsError>
[src]

Lists the machine learning algorithms that have been created.

fn list_code_repositories(
    &self,
    input: ListCodeRepositoriesInput
) -> RusotoFuture<ListCodeRepositoriesOutput, ListCodeRepositoriesError>
[src]

Gets a list of the Git repositories in your account.

fn list_compilation_jobs(
    &self,
    input: ListCompilationJobsRequest
) -> RusotoFuture<ListCompilationJobsResponse, ListCompilationJobsError>
[src]

Lists model compilation jobs that satisfy various filters.

To create a model compilation job, use CreateCompilationJob. To get information about a particular model compilation job you have created, use DescribeCompilationJob.

fn list_endpoint_configs(
    &self,
    input: ListEndpointConfigsInput
) -> RusotoFuture<ListEndpointConfigsOutput, ListEndpointConfigsError>
[src]

Lists endpoint configurations.

fn list_endpoints(
    &self,
    input: ListEndpointsInput
) -> RusotoFuture<ListEndpointsOutput, ListEndpointsError>
[src]

Lists endpoints.

fn list_hyper_parameter_tuning_jobs(
    &self,
    input: ListHyperParameterTuningJobsRequest
) -> RusotoFuture<ListHyperParameterTuningJobsResponse, ListHyperParameterTuningJobsError>
[src]

Gets a list of HyperParameterTuningJobSummary objects that describe the hyperparameter tuning jobs launched in your account.

fn list_labeling_jobs(
    &self,
    input: ListLabelingJobsRequest
) -> RusotoFuture<ListLabelingJobsResponse, ListLabelingJobsError>
[src]

Gets a list of labeling jobs.

fn list_labeling_jobs_for_workteam(
    &self,
    input: ListLabelingJobsForWorkteamRequest
) -> RusotoFuture<ListLabelingJobsForWorkteamResponse, ListLabelingJobsForWorkteamError>
[src]

Gets a list of labeling jobs assigned to a specified work team.

fn list_model_packages(
    &self,
    input: ListModelPackagesInput
) -> RusotoFuture<ListModelPackagesOutput, ListModelPackagesError>
[src]

Lists the model packages that have been created.

fn list_models(
    &self,
    input: ListModelsInput
) -> RusotoFuture<ListModelsOutput, ListModelsError>
[src]

Lists models created with the CreateModel API.

fn list_notebook_instance_lifecycle_configs(
    &self,
    input: ListNotebookInstanceLifecycleConfigsInput
) -> RusotoFuture<ListNotebookInstanceLifecycleConfigsOutput, ListNotebookInstanceLifecycleConfigsError>
[src]

Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API.

fn list_notebook_instances(
    &self,
    input: ListNotebookInstancesInput
) -> RusotoFuture<ListNotebookInstancesOutput, ListNotebookInstancesError>
[src]

Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region.

fn list_subscribed_workteams(
    &self,
    input: ListSubscribedWorkteamsRequest
) -> RusotoFuture<ListSubscribedWorkteamsResponse, ListSubscribedWorkteamsError>
[src]

Gets a list of the work teams that you are subscribed to in the AWS Marketplace. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

fn list_tags(
    &self,
    input: ListTagsInput
) -> RusotoFuture<ListTagsOutput, ListTagsError>
[src]

Returns the tags for the specified Amazon SageMaker resource.

fn list_training_jobs(
    &self,
    input: ListTrainingJobsRequest
) -> RusotoFuture<ListTrainingJobsResponse, ListTrainingJobsError>
[src]

Lists training jobs.

fn list_training_jobs_for_hyper_parameter_tuning_job(
    &self,
    input: ListTrainingJobsForHyperParameterTuningJobRequest
) -> RusotoFuture<ListTrainingJobsForHyperParameterTuningJobResponse, ListTrainingJobsForHyperParameterTuningJobError>
[src]

Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched.

fn list_transform_jobs(
    &self,
    input: ListTransformJobsRequest
) -> RusotoFuture<ListTransformJobsResponse, ListTransformJobsError>
[src]

Lists transform jobs.

fn list_workteams(
    &self,
    input: ListWorkteamsRequest
) -> RusotoFuture<ListWorkteamsResponse, ListWorkteamsError>
[src]

Gets a list of work teams that you have defined in a region. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

fn render_ui_template(
    &self,
    input: RenderUiTemplateRequest
) -> RusotoFuture<RenderUiTemplateResponse, RenderUiTemplateError>
[src]

Renders the UI template so that you can preview the worker's experience.

fn search(
    &self,
    input: SearchRequest
) -> RusotoFuture<SearchResponse, SearchError>
[src]

Finds Amazon SageMaker resources that match a search query. Matching resource objects are returned as a list of SearchResult objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numerical, text, Booleans, and timestamps.

fn start_notebook_instance(
    &self,
    input: StartNotebookInstanceInput
) -> RusotoFuture<(), StartNotebookInstanceError>
[src]

Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook.

fn stop_compilation_job(
    &self,
    input: StopCompilationJobRequest
) -> RusotoFuture<(), StopCompilationJobError>
[src]

Stops a model compilation job.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal.

When it receives a StopCompilationJob request, Amazon SageMaker changes the CompilationJobSummary$CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobSummary$CompilationJobStatus to Stopped.

fn stop_hyper_parameter_tuning_job(
    &self,
    input: StopHyperParameterTuningJobRequest
) -> RusotoFuture<(), StopHyperParameterTuningJobError>
[src]

Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched.

All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the Stopped state, it releases all reserved resources for the tuning job.

fn stop_labeling_job(
    &self,
    input: StopLabelingJobRequest
) -> RusotoFuture<(), StopLabelingJobError>
[src]

Stops a running labeling job. A job that is stopped cannot be restarted. Any results obtained before the job is stopped are placed in the Amazon S3 output bucket.

fn stop_notebook_instance(
    &self,
    input: StopNotebookInstanceInput
) -> RusotoFuture<(), StopNotebookInstanceError>
[src]

Terminates the ML compute instance. Before terminating the instance, Amazon SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves the ML storage volume. Amazon SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

fn stop_training_job(
    &self,
    input: StopTrainingJobRequest
) -> RusotoFuture<(), StopTrainingJobError>
[src]

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.

When it receives a StopTrainingJob request, Amazon SageMaker changes the status of the job to Stopping. After Amazon SageMaker stops the job, it sets the status to Stopped.

fn stop_transform_job(
    &self,
    input: StopTransformJobRequest
) -> RusotoFuture<(), StopTransformJobError>
[src]

Stops a transform job.

When Amazon SageMaker receives a StopTransformJob request, the status of the job changes to Stopping. After Amazon SageMaker stops the job, the status is set to Stopped. When you stop a transform job before it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

fn update_code_repository(
    &self,
    input: UpdateCodeRepositoryInput
) -> RusotoFuture<UpdateCodeRepositoryOutput, UpdateCodeRepositoryError>
[src]

Updates the specified Git repository with the specified values.

fn update_endpoint(
    &self,
    input: UpdateEndpointInput
) -> RusotoFuture<UpdateEndpointOutput, UpdateEndpointError>
[src]

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

fn update_endpoint_weights_and_capacities(
    &self,
    input: UpdateEndpointWeightsAndCapacitiesInput
) -> RusotoFuture<UpdateEndpointWeightsAndCapacitiesOutput, UpdateEndpointWeightsAndCapacitiesError>
[src]

Updates variant weight of one or more variants associated with an existing endpoint, or capacity of one variant associated with an existing endpoint. When it receives the request, Amazon SageMaker sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

fn update_notebook_instance(
    &self,
    input: UpdateNotebookInstanceInput
) -> RusotoFuture<UpdateNotebookInstanceOutput, UpdateNotebookInstanceError>
[src]

Updates a notebook instance. NotebookInstance updates include upgrading or downgrading the ML compute instance used for your notebook instance to accommodate changes in your workload requirements.

fn update_notebook_instance_lifecycle_config(
    &self,
    input: UpdateNotebookInstanceLifecycleConfigInput
) -> RusotoFuture<UpdateNotebookInstanceLifecycleConfigOutput, UpdateNotebookInstanceLifecycleConfigError>
[src]

Updates a notebook instance lifecycle configuration created with the CreateNotebookInstanceLifecycleConfig API.

fn update_workteam(
    &self,
    input: UpdateWorkteamRequest
) -> RusotoFuture<UpdateWorkteamResponse, UpdateWorkteamError>
[src]

Updates an existing work team with new member definitions or description.

Loading content...