{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE StrictData #-} {-# LANGUAGE NoImplicitPrelude #-} {-# OPTIONS_GHC -fno-warn-unused-imports #-} {-# OPTIONS_GHC -fno-warn-unused-matches #-} -- Derived from AWS service descriptions, licensed under Apache 2.0. -- | -- Module : Amazonka.SageMaker.Types.OutputConfig -- Copyright : (c) 2013-2021 Brendan Hay -- License : Mozilla Public License, v. 2.0. -- Maintainer : Brendan Hay <brendan.g.hay+amazonka@gmail.com> -- Stability : auto-generated -- Portability : non-portable (GHC extensions) module Amazonka.SageMaker.Types.OutputConfig where import qualified Amazonka.Core as Core import qualified Amazonka.Lens as Lens import qualified Amazonka.Prelude as Prelude import Amazonka.SageMaker.Types.TargetDevice import Amazonka.SageMaker.Types.TargetPlatform -- | Contains information about the output location for the compiled model -- and the target device that the model runs on. @TargetDevice@ and -- @TargetPlatform@ are mutually exclusive, so you need to choose one -- between the two to specify your target device or platform. If you cannot -- find your device you want to use from the @TargetDevice@ list, use -- @TargetPlatform@ to describe the platform of your edge device and -- @CompilerOptions@ if there are specific settings that are required or -- recommended to use for particular TargetPlatform. -- -- /See:/ 'newOutputConfig' smart constructor. data OutputConfig = OutputConfig' { -- | Contains information about a target platform that you want your model to -- run on, such as OS, architecture, and accelerators. It is an alternative -- of @TargetDevice@. -- -- The following examples show how to configure the @TargetPlatform@ and -- @CompilerOptions@ JSON strings for popular target platforms: -- -- - Raspberry Pi 3 Model B+ -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},@ -- -- @ \"CompilerOptions\": {\'mattr\': [\'+neon\']}@ -- -- - Jetson TX2 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'gpu-code\': \'sm_62\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.0\'}@ -- -- - EC2 m5.2xlarge instance OS -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'mcpu\': \'skylake-avx512\'}@ -- -- - RK3399 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"MALI\"}@ -- -- - ARMv7 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 25, \'mattr\': [\'+neon\']}@ -- -- - ARMv8 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 29}@ OutputConfig -> Maybe TargetPlatform targetPlatform :: Prelude.Maybe TargetPlatform, -- | The Amazon Web Services Key Management Service key (Amazon Web Services -- KMS) that Amazon SageMaker uses to encrypt your output models with -- Amazon S3 server-side encryption after compilation job. If you don\'t -- provide a KMS key ID, Amazon SageMaker uses the default KMS key for -- Amazon S3 for your role\'s account. For more information, see -- <https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html KMS-Managed Encryption Keys> -- in the /Amazon Simple Storage Service Developer Guide./ -- -- The KmsKeyId can be any of the following formats: -- -- - Key ID: @1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Key ARN: -- @arn:aws:kms:us-west-2:111122223333:key\/1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Alias name: @alias\/ExampleAlias@ -- -- - Alias name ARN: -- @arn:aws:kms:us-west-2:111122223333:alias\/ExampleAlias@ OutputConfig -> Maybe Text kmsKeyId :: Prelude.Maybe Prelude.Text, -- | Specifies additional parameters for compiler options in JSON format. The -- compiler options are @TargetPlatform@ specific. It is required for -- NVIDIA accelerators and highly recommended for CPU compilations. For any -- other cases, it is optional to specify @CompilerOptions.@ -- -- - @DTYPE@: Specifies the data type for the input. When compiling for -- @ml_*@ (except for @ml_inf@) instances using PyTorch framework, -- provide the data type (dtype) of the model\'s input. @\"float32\"@ -- is used if @\"DTYPE\"@ is not specified. Options for data type are: -- -- - float32: Use either @\"float\"@ or @\"float32\"@. -- -- - int64: Use either @\"int64\"@ or @\"long\"@. -- -- For example, @{\"dtype\" : \"float32\"}@. -- -- - @CPU@: Compilation for CPU supports the following compiler options. -- -- - @mcpu@: CPU micro-architecture. For example, -- @{\'mcpu\': \'skylake-avx512\'}@ -- -- - @mattr@: CPU flags. For example, -- @{\'mattr\': [\'+neon\', \'+vfpv4\']}@ -- -- - @ARM@: Details of ARM CPU compilations. -- -- - @NEON@: NEON is an implementation of the Advanced SIMD extension -- used in ARMv7 processors. -- -- For example, add @{\'mattr\': [\'+neon\']}@ to the compiler -- options if compiling for ARM 32-bit platform with the NEON -- support. -- -- - @NVIDIA@: Compilation for NVIDIA GPU supports the following compiler -- options. -- -- - @gpu_code@: Specifies the targeted architecture. -- -- - @trt-ver@: Specifies the TensorRT versions in x.y.z. format. -- -- - @cuda-ver@: Specifies the CUDA version in x.y format. -- -- For example, -- @{\'gpu-code\': \'sm_72\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.1\'}@ -- -- - @ANDROID@: Compilation for the Android OS supports the following -- compiler options: -- -- - @ANDROID_PLATFORM@: Specifies the Android API levels. Available -- levels range from 21 to 29. For example, -- @{\'ANDROID_PLATFORM\': 28}@. -- -- - @mattr@: Add @{\'mattr\': [\'+neon\']}@ to compiler options if -- compiling for ARM 32-bit platform with NEON support. -- -- - @INFERENTIA@: Compilation for target ml_inf1 uses compiler options -- passed in as a JSON string. For example, -- @\"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"@. -- -- For information about supported compiler options, see -- <https://github.com/aws/aws-neuron-sdk/blob/master/docs/neuron-cc/command-line-reference.md Neuron Compiler CLI>. -- -- - @CoreML@: Compilation for the CoreML OutputConfig$TargetDevice -- supports the following compiler options: -- -- - @class_labels@: Specifies the classification labels file name -- inside input tar.gz file. For example, -- @{\"class_labels\": \"imagenet_labels_1000.txt\"}@. Labels -- inside the txt file should be separated by newlines. -- -- - @EIA@: Compilation for the Elastic Inference Accelerator supports -- the following compiler options: -- -- - @precision_mode@: Specifies the precision of compiled artifacts. -- Supported values are @\"FP16\"@ and @\"FP32\"@. Default is -- @\"FP32\"@. -- -- - @signature_def_key@: Specifies the signature to use for models -- in SavedModel format. Defaults is TensorFlow\'s default -- signature def key. -- -- - @output_names@: Specifies a list of output tensor names for -- models in FrozenGraph format. Set at most one API field, either: -- @signature_def_key@ or @output_names@. -- -- For example: -- @{\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}@ OutputConfig -> Maybe Text compilerOptions :: Prelude.Maybe Prelude.Text, -- | Identifies the target device or the machine learning instance that you -- want to run your model on after the compilation has completed. -- Alternatively, you can specify OS, architecture, and accelerator using -- TargetPlatform fields. It can be used instead of @TargetPlatform@. OutputConfig -> Maybe TargetDevice targetDevice :: Prelude.Maybe TargetDevice, -- | Identifies the S3 bucket where you want Amazon SageMaker to store the -- model artifacts. For example, @s3:\/\/bucket-name\/key-name-prefix@. OutputConfig -> Text s3OutputLocation :: Prelude.Text } deriving (OutputConfig -> OutputConfig -> Bool (OutputConfig -> OutputConfig -> Bool) -> (OutputConfig -> OutputConfig -> Bool) -> Eq OutputConfig forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a /= :: OutputConfig -> OutputConfig -> Bool $c/= :: OutputConfig -> OutputConfig -> Bool == :: OutputConfig -> OutputConfig -> Bool $c== :: OutputConfig -> OutputConfig -> Bool Prelude.Eq, ReadPrec [OutputConfig] ReadPrec OutputConfig Int -> ReadS OutputConfig ReadS [OutputConfig] (Int -> ReadS OutputConfig) -> ReadS [OutputConfig] -> ReadPrec OutputConfig -> ReadPrec [OutputConfig] -> Read OutputConfig forall a. (Int -> ReadS a) -> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a readListPrec :: ReadPrec [OutputConfig] $creadListPrec :: ReadPrec [OutputConfig] readPrec :: ReadPrec OutputConfig $creadPrec :: ReadPrec OutputConfig readList :: ReadS [OutputConfig] $creadList :: ReadS [OutputConfig] readsPrec :: Int -> ReadS OutputConfig $creadsPrec :: Int -> ReadS OutputConfig Prelude.Read, Int -> OutputConfig -> ShowS [OutputConfig] -> ShowS OutputConfig -> String (Int -> OutputConfig -> ShowS) -> (OutputConfig -> String) -> ([OutputConfig] -> ShowS) -> Show OutputConfig forall a. (Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a showList :: [OutputConfig] -> ShowS $cshowList :: [OutputConfig] -> ShowS show :: OutputConfig -> String $cshow :: OutputConfig -> String showsPrec :: Int -> OutputConfig -> ShowS $cshowsPrec :: Int -> OutputConfig -> ShowS Prelude.Show, (forall x. OutputConfig -> Rep OutputConfig x) -> (forall x. Rep OutputConfig x -> OutputConfig) -> Generic OutputConfig forall x. Rep OutputConfig x -> OutputConfig forall x. OutputConfig -> Rep OutputConfig x forall a. (forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a $cto :: forall x. Rep OutputConfig x -> OutputConfig $cfrom :: forall x. OutputConfig -> Rep OutputConfig x Prelude.Generic) -- | -- Create a value of 'OutputConfig' with all optional fields omitted. -- -- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields. -- -- The following record fields are available, with the corresponding lenses provided -- for backwards compatibility: -- -- 'targetPlatform', 'outputConfig_targetPlatform' - Contains information about a target platform that you want your model to -- run on, such as OS, architecture, and accelerators. It is an alternative -- of @TargetDevice@. -- -- The following examples show how to configure the @TargetPlatform@ and -- @CompilerOptions@ JSON strings for popular target platforms: -- -- - Raspberry Pi 3 Model B+ -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},@ -- -- @ \"CompilerOptions\": {\'mattr\': [\'+neon\']}@ -- -- - Jetson TX2 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'gpu-code\': \'sm_62\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.0\'}@ -- -- - EC2 m5.2xlarge instance OS -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'mcpu\': \'skylake-avx512\'}@ -- -- - RK3399 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"MALI\"}@ -- -- - ARMv7 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 25, \'mattr\': [\'+neon\']}@ -- -- - ARMv8 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 29}@ -- -- 'kmsKeyId', 'outputConfig_kmsKeyId' - The Amazon Web Services Key Management Service key (Amazon Web Services -- KMS) that Amazon SageMaker uses to encrypt your output models with -- Amazon S3 server-side encryption after compilation job. If you don\'t -- provide a KMS key ID, Amazon SageMaker uses the default KMS key for -- Amazon S3 for your role\'s account. For more information, see -- <https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html KMS-Managed Encryption Keys> -- in the /Amazon Simple Storage Service Developer Guide./ -- -- The KmsKeyId can be any of the following formats: -- -- - Key ID: @1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Key ARN: -- @arn:aws:kms:us-west-2:111122223333:key\/1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Alias name: @alias\/ExampleAlias@ -- -- - Alias name ARN: -- @arn:aws:kms:us-west-2:111122223333:alias\/ExampleAlias@ -- -- 'compilerOptions', 'outputConfig_compilerOptions' - Specifies additional parameters for compiler options in JSON format. The -- compiler options are @TargetPlatform@ specific. It is required for -- NVIDIA accelerators and highly recommended for CPU compilations. For any -- other cases, it is optional to specify @CompilerOptions.@ -- -- - @DTYPE@: Specifies the data type for the input. When compiling for -- @ml_*@ (except for @ml_inf@) instances using PyTorch framework, -- provide the data type (dtype) of the model\'s input. @\"float32\"@ -- is used if @\"DTYPE\"@ is not specified. Options for data type are: -- -- - float32: Use either @\"float\"@ or @\"float32\"@. -- -- - int64: Use either @\"int64\"@ or @\"long\"@. -- -- For example, @{\"dtype\" : \"float32\"}@. -- -- - @CPU@: Compilation for CPU supports the following compiler options. -- -- - @mcpu@: CPU micro-architecture. For example, -- @{\'mcpu\': \'skylake-avx512\'}@ -- -- - @mattr@: CPU flags. For example, -- @{\'mattr\': [\'+neon\', \'+vfpv4\']}@ -- -- - @ARM@: Details of ARM CPU compilations. -- -- - @NEON@: NEON is an implementation of the Advanced SIMD extension -- used in ARMv7 processors. -- -- For example, add @{\'mattr\': [\'+neon\']}@ to the compiler -- options if compiling for ARM 32-bit platform with the NEON -- support. -- -- - @NVIDIA@: Compilation for NVIDIA GPU supports the following compiler -- options. -- -- - @gpu_code@: Specifies the targeted architecture. -- -- - @trt-ver@: Specifies the TensorRT versions in x.y.z. format. -- -- - @cuda-ver@: Specifies the CUDA version in x.y format. -- -- For example, -- @{\'gpu-code\': \'sm_72\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.1\'}@ -- -- - @ANDROID@: Compilation for the Android OS supports the following -- compiler options: -- -- - @ANDROID_PLATFORM@: Specifies the Android API levels. Available -- levels range from 21 to 29. For example, -- @{\'ANDROID_PLATFORM\': 28}@. -- -- - @mattr@: Add @{\'mattr\': [\'+neon\']}@ to compiler options if -- compiling for ARM 32-bit platform with NEON support. -- -- - @INFERENTIA@: Compilation for target ml_inf1 uses compiler options -- passed in as a JSON string. For example, -- @\"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"@. -- -- For information about supported compiler options, see -- <https://github.com/aws/aws-neuron-sdk/blob/master/docs/neuron-cc/command-line-reference.md Neuron Compiler CLI>. -- -- - @CoreML@: Compilation for the CoreML OutputConfig$TargetDevice -- supports the following compiler options: -- -- - @class_labels@: Specifies the classification labels file name -- inside input tar.gz file. For example, -- @{\"class_labels\": \"imagenet_labels_1000.txt\"}@. Labels -- inside the txt file should be separated by newlines. -- -- - @EIA@: Compilation for the Elastic Inference Accelerator supports -- the following compiler options: -- -- - @precision_mode@: Specifies the precision of compiled artifacts. -- Supported values are @\"FP16\"@ and @\"FP32\"@. Default is -- @\"FP32\"@. -- -- - @signature_def_key@: Specifies the signature to use for models -- in SavedModel format. Defaults is TensorFlow\'s default -- signature def key. -- -- - @output_names@: Specifies a list of output tensor names for -- models in FrozenGraph format. Set at most one API field, either: -- @signature_def_key@ or @output_names@. -- -- For example: -- @{\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}@ -- -- 'targetDevice', 'outputConfig_targetDevice' - Identifies the target device or the machine learning instance that you -- want to run your model on after the compilation has completed. -- Alternatively, you can specify OS, architecture, and accelerator using -- TargetPlatform fields. It can be used instead of @TargetPlatform@. -- -- 's3OutputLocation', 'outputConfig_s3OutputLocation' - Identifies the S3 bucket where you want Amazon SageMaker to store the -- model artifacts. For example, @s3:\/\/bucket-name\/key-name-prefix@. newOutputConfig :: -- | 's3OutputLocation' Prelude.Text -> OutputConfig newOutputConfig :: Text -> OutputConfig newOutputConfig Text pS3OutputLocation_ = OutputConfig' :: Maybe TargetPlatform -> Maybe Text -> Maybe Text -> Maybe TargetDevice -> Text -> OutputConfig OutputConfig' { $sel:targetPlatform:OutputConfig' :: Maybe TargetPlatform targetPlatform = Maybe TargetPlatform forall a. Maybe a Prelude.Nothing, $sel:kmsKeyId:OutputConfig' :: Maybe Text kmsKeyId = Maybe Text forall a. Maybe a Prelude.Nothing, $sel:compilerOptions:OutputConfig' :: Maybe Text compilerOptions = Maybe Text forall a. Maybe a Prelude.Nothing, $sel:targetDevice:OutputConfig' :: Maybe TargetDevice targetDevice = Maybe TargetDevice forall a. Maybe a Prelude.Nothing, $sel:s3OutputLocation:OutputConfig' :: Text s3OutputLocation = Text pS3OutputLocation_ } -- | Contains information about a target platform that you want your model to -- run on, such as OS, architecture, and accelerators. It is an alternative -- of @TargetDevice@. -- -- The following examples show how to configure the @TargetPlatform@ and -- @CompilerOptions@ JSON strings for popular target platforms: -- -- - Raspberry Pi 3 Model B+ -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},@ -- -- @ \"CompilerOptions\": {\'mattr\': [\'+neon\']}@ -- -- - Jetson TX2 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'gpu-code\': \'sm_62\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.0\'}@ -- -- - EC2 m5.2xlarge instance OS -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'mcpu\': \'skylake-avx512\'}@ -- -- - RK3399 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"MALI\"}@ -- -- - ARMv7 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 25, \'mattr\': [\'+neon\']}@ -- -- - ARMv8 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 29}@ outputConfig_targetPlatform :: Lens.Lens' OutputConfig (Prelude.Maybe TargetPlatform) outputConfig_targetPlatform :: (Maybe TargetPlatform -> f (Maybe TargetPlatform)) -> OutputConfig -> f OutputConfig outputConfig_targetPlatform = (OutputConfig -> Maybe TargetPlatform) -> (OutputConfig -> Maybe TargetPlatform -> OutputConfig) -> Lens OutputConfig OutputConfig (Maybe TargetPlatform) (Maybe TargetPlatform) forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe TargetPlatform targetPlatform :: Maybe TargetPlatform $sel:targetPlatform:OutputConfig' :: OutputConfig -> Maybe TargetPlatform targetPlatform} -> Maybe TargetPlatform targetPlatform) (\s :: OutputConfig s@OutputConfig' {} Maybe TargetPlatform a -> OutputConfig s {$sel:targetPlatform:OutputConfig' :: Maybe TargetPlatform targetPlatform = Maybe TargetPlatform a} :: OutputConfig) -- | The Amazon Web Services Key Management Service key (Amazon Web Services -- KMS) that Amazon SageMaker uses to encrypt your output models with -- Amazon S3 server-side encryption after compilation job. If you don\'t -- provide a KMS key ID, Amazon SageMaker uses the default KMS key for -- Amazon S3 for your role\'s account. For more information, see -- <https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html KMS-Managed Encryption Keys> -- in the /Amazon Simple Storage Service Developer Guide./ -- -- The KmsKeyId can be any of the following formats: -- -- - Key ID: @1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Key ARN: -- @arn:aws:kms:us-west-2:111122223333:key\/1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Alias name: @alias\/ExampleAlias@ -- -- - Alias name ARN: -- @arn:aws:kms:us-west-2:111122223333:alias\/ExampleAlias@ outputConfig_kmsKeyId :: Lens.Lens' OutputConfig (Prelude.Maybe Prelude.Text) outputConfig_kmsKeyId :: (Maybe Text -> f (Maybe Text)) -> OutputConfig -> f OutputConfig outputConfig_kmsKeyId = (OutputConfig -> Maybe Text) -> (OutputConfig -> Maybe Text -> OutputConfig) -> Lens OutputConfig OutputConfig (Maybe Text) (Maybe Text) forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe Text kmsKeyId :: Maybe Text $sel:kmsKeyId:OutputConfig' :: OutputConfig -> Maybe Text kmsKeyId} -> Maybe Text kmsKeyId) (\s :: OutputConfig s@OutputConfig' {} Maybe Text a -> OutputConfig s {$sel:kmsKeyId:OutputConfig' :: Maybe Text kmsKeyId = Maybe Text a} :: OutputConfig) -- | Specifies additional parameters for compiler options in JSON format. The -- compiler options are @TargetPlatform@ specific. It is required for -- NVIDIA accelerators and highly recommended for CPU compilations. For any -- other cases, it is optional to specify @CompilerOptions.@ -- -- - @DTYPE@: Specifies the data type for the input. When compiling for -- @ml_*@ (except for @ml_inf@) instances using PyTorch framework, -- provide the data type (dtype) of the model\'s input. @\"float32\"@ -- is used if @\"DTYPE\"@ is not specified. Options for data type are: -- -- - float32: Use either @\"float\"@ or @\"float32\"@. -- -- - int64: Use either @\"int64\"@ or @\"long\"@. -- -- For example, @{\"dtype\" : \"float32\"}@. -- -- - @CPU@: Compilation for CPU supports the following compiler options. -- -- - @mcpu@: CPU micro-architecture. For example, -- @{\'mcpu\': \'skylake-avx512\'}@ -- -- - @mattr@: CPU flags. For example, -- @{\'mattr\': [\'+neon\', \'+vfpv4\']}@ -- -- - @ARM@: Details of ARM CPU compilations. -- -- - @NEON@: NEON is an implementation of the Advanced SIMD extension -- used in ARMv7 processors. -- -- For example, add @{\'mattr\': [\'+neon\']}@ to the compiler -- options if compiling for ARM 32-bit platform with the NEON -- support. -- -- - @NVIDIA@: Compilation for NVIDIA GPU supports the following compiler -- options. -- -- - @gpu_code@: Specifies the targeted architecture. -- -- - @trt-ver@: Specifies the TensorRT versions in x.y.z. format. -- -- - @cuda-ver@: Specifies the CUDA version in x.y format. -- -- For example, -- @{\'gpu-code\': \'sm_72\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.1\'}@ -- -- - @ANDROID@: Compilation for the Android OS supports the following -- compiler options: -- -- - @ANDROID_PLATFORM@: Specifies the Android API levels. Available -- levels range from 21 to 29. For example, -- @{\'ANDROID_PLATFORM\': 28}@. -- -- - @mattr@: Add @{\'mattr\': [\'+neon\']}@ to compiler options if -- compiling for ARM 32-bit platform with NEON support. -- -- - @INFERENTIA@: Compilation for target ml_inf1 uses compiler options -- passed in as a JSON string. For example, -- @\"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"@. -- -- For information about supported compiler options, see -- <https://github.com/aws/aws-neuron-sdk/blob/master/docs/neuron-cc/command-line-reference.md Neuron Compiler CLI>. -- -- - @CoreML@: Compilation for the CoreML OutputConfig$TargetDevice -- supports the following compiler options: -- -- - @class_labels@: Specifies the classification labels file name -- inside input tar.gz file. For example, -- @{\"class_labels\": \"imagenet_labels_1000.txt\"}@. Labels -- inside the txt file should be separated by newlines. -- -- - @EIA@: Compilation for the Elastic Inference Accelerator supports -- the following compiler options: -- -- - @precision_mode@: Specifies the precision of compiled artifacts. -- Supported values are @\"FP16\"@ and @\"FP32\"@. Default is -- @\"FP32\"@. -- -- - @signature_def_key@: Specifies the signature to use for models -- in SavedModel format. Defaults is TensorFlow\'s default -- signature def key. -- -- - @output_names@: Specifies a list of output tensor names for -- models in FrozenGraph format. Set at most one API field, either: -- @signature_def_key@ or @output_names@. -- -- For example: -- @{\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}@ outputConfig_compilerOptions :: Lens.Lens' OutputConfig (Prelude.Maybe Prelude.Text) outputConfig_compilerOptions :: (Maybe Text -> f (Maybe Text)) -> OutputConfig -> f OutputConfig outputConfig_compilerOptions = (OutputConfig -> Maybe Text) -> (OutputConfig -> Maybe Text -> OutputConfig) -> Lens OutputConfig OutputConfig (Maybe Text) (Maybe Text) forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe Text compilerOptions :: Maybe Text $sel:compilerOptions:OutputConfig' :: OutputConfig -> Maybe Text compilerOptions} -> Maybe Text compilerOptions) (\s :: OutputConfig s@OutputConfig' {} Maybe Text a -> OutputConfig s {$sel:compilerOptions:OutputConfig' :: Maybe Text compilerOptions = Maybe Text a} :: OutputConfig) -- | Identifies the target device or the machine learning instance that you -- want to run your model on after the compilation has completed. -- Alternatively, you can specify OS, architecture, and accelerator using -- TargetPlatform fields. It can be used instead of @TargetPlatform@. outputConfig_targetDevice :: Lens.Lens' OutputConfig (Prelude.Maybe TargetDevice) outputConfig_targetDevice :: (Maybe TargetDevice -> f (Maybe TargetDevice)) -> OutputConfig -> f OutputConfig outputConfig_targetDevice = (OutputConfig -> Maybe TargetDevice) -> (OutputConfig -> Maybe TargetDevice -> OutputConfig) -> Lens OutputConfig OutputConfig (Maybe TargetDevice) (Maybe TargetDevice) forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe TargetDevice targetDevice :: Maybe TargetDevice $sel:targetDevice:OutputConfig' :: OutputConfig -> Maybe TargetDevice targetDevice} -> Maybe TargetDevice targetDevice) (\s :: OutputConfig s@OutputConfig' {} Maybe TargetDevice a -> OutputConfig s {$sel:targetDevice:OutputConfig' :: Maybe TargetDevice targetDevice = Maybe TargetDevice a} :: OutputConfig) -- | Identifies the S3 bucket where you want Amazon SageMaker to store the -- model artifacts. For example, @s3:\/\/bucket-name\/key-name-prefix@. outputConfig_s3OutputLocation :: Lens.Lens' OutputConfig Prelude.Text outputConfig_s3OutputLocation :: (Text -> f Text) -> OutputConfig -> f OutputConfig outputConfig_s3OutputLocation = (OutputConfig -> Text) -> (OutputConfig -> Text -> OutputConfig) -> Lens OutputConfig OutputConfig Text Text forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Text s3OutputLocation :: Text $sel:s3OutputLocation:OutputConfig' :: OutputConfig -> Text s3OutputLocation} -> Text s3OutputLocation) (\s :: OutputConfig s@OutputConfig' {} Text a -> OutputConfig s {$sel:s3OutputLocation:OutputConfig' :: Text s3OutputLocation = Text a} :: OutputConfig) instance Core.FromJSON OutputConfig where parseJSON :: Value -> Parser OutputConfig parseJSON = String -> (Object -> Parser OutputConfig) -> Value -> Parser OutputConfig forall a. String -> (Object -> Parser a) -> Value -> Parser a Core.withObject String "OutputConfig" ( \Object x -> Maybe TargetPlatform -> Maybe Text -> Maybe Text -> Maybe TargetDevice -> Text -> OutputConfig OutputConfig' (Maybe TargetPlatform -> Maybe Text -> Maybe Text -> Maybe TargetDevice -> Text -> OutputConfig) -> Parser (Maybe TargetPlatform) -> Parser (Maybe Text -> Maybe Text -> Maybe TargetDevice -> Text -> OutputConfig) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> (Object x Object -> Text -> Parser (Maybe TargetPlatform) forall a. FromJSON a => Object -> Text -> Parser (Maybe a) Core..:? Text "TargetPlatform") Parser (Maybe Text -> Maybe Text -> Maybe TargetDevice -> Text -> OutputConfig) -> Parser (Maybe Text) -> Parser (Maybe Text -> Maybe TargetDevice -> Text -> OutputConfig) forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x Object -> Text -> Parser (Maybe Text) forall a. FromJSON a => Object -> Text -> Parser (Maybe a) Core..:? Text "KmsKeyId") Parser (Maybe Text -> Maybe TargetDevice -> Text -> OutputConfig) -> Parser (Maybe Text) -> Parser (Maybe TargetDevice -> Text -> OutputConfig) forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x Object -> Text -> Parser (Maybe Text) forall a. FromJSON a => Object -> Text -> Parser (Maybe a) Core..:? Text "CompilerOptions") Parser (Maybe TargetDevice -> Text -> OutputConfig) -> Parser (Maybe TargetDevice) -> Parser (Text -> OutputConfig) forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x Object -> Text -> Parser (Maybe TargetDevice) forall a. FromJSON a => Object -> Text -> Parser (Maybe a) Core..:? Text "TargetDevice") Parser (Text -> OutputConfig) -> Parser Text -> Parser OutputConfig forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x Object -> Text -> Parser Text forall a. FromJSON a => Object -> Text -> Parser a Core..: Text "S3OutputLocation") ) instance Prelude.Hashable OutputConfig instance Prelude.NFData OutputConfig instance Core.ToJSON OutputConfig where toJSON :: OutputConfig -> Value toJSON OutputConfig' {Maybe Text Maybe TargetDevice Maybe TargetPlatform Text s3OutputLocation :: Text targetDevice :: Maybe TargetDevice compilerOptions :: Maybe Text kmsKeyId :: Maybe Text targetPlatform :: Maybe TargetPlatform $sel:s3OutputLocation:OutputConfig' :: OutputConfig -> Text $sel:targetDevice:OutputConfig' :: OutputConfig -> Maybe TargetDevice $sel:compilerOptions:OutputConfig' :: OutputConfig -> Maybe Text $sel:kmsKeyId:OutputConfig' :: OutputConfig -> Maybe Text $sel:targetPlatform:OutputConfig' :: OutputConfig -> Maybe TargetPlatform ..} = [Pair] -> Value Core.object ( [Maybe Pair] -> [Pair] forall a. [Maybe a] -> [a] Prelude.catMaybes [ (Text "TargetPlatform" Text -> TargetPlatform -> Pair forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv Core..=) (TargetPlatform -> Pair) -> Maybe TargetPlatform -> Maybe Pair forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe TargetPlatform targetPlatform, (Text "KmsKeyId" Text -> Text -> Pair forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv Core..=) (Text -> Pair) -> Maybe Text -> Maybe Pair forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe Text kmsKeyId, (Text "CompilerOptions" Text -> Text -> Pair forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv Core..=) (Text -> Pair) -> Maybe Text -> Maybe Pair forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe Text compilerOptions, (Text "TargetDevice" Text -> TargetDevice -> Pair forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv Core..=) (TargetDevice -> Pair) -> Maybe TargetDevice -> Maybe Pair forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe TargetDevice targetDevice, Pair -> Maybe Pair forall a. a -> Maybe a Prelude.Just (Text "S3OutputLocation" Text -> Text -> Pair forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv Core..= Text s3OutputLocation) ] )