{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.SageMaker.Types.InputConfig
-- Copyright   : (c) 2013-2021 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay <brendan.g.hay+amazonka@gmail.com>
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
module Amazonka.SageMaker.Types.InputConfig where

import qualified Amazonka.Core as Core
import qualified Amazonka.Lens as Lens
import qualified Amazonka.Prelude as Prelude
import Amazonka.SageMaker.Types.Framework

-- | Contains information about the location of input model artifacts, the
-- name and shape of the expected data inputs, and the framework in which
-- the model was trained.
--
-- /See:/ 'newInputConfig' smart constructor.
data InputConfig = InputConfig'
  { -- | Specifies the framework version to use.
    --
    -- This API field is only supported for PyTorch framework versions @1.4@,
    -- @1.5@, and @1.6@ for cloud instance target devices: @ml_c4@, @ml_c5@,
    -- @ml_m4@, @ml_m5@, @ml_p2@, @ml_p3@, and @ml_g4dn@.
    InputConfig -> Maybe Text
frameworkVersion :: Prelude.Maybe Prelude.Text,
    -- | The S3 path where the model artifacts, which result from model training,
    -- are stored. This path must point to a single gzip compressed tar archive
    -- (.tar.gz suffix).
    InputConfig -> Text
s3Uri :: Prelude.Text,
    -- | Specifies the name and shape of the expected data inputs for your
    -- trained model with a JSON dictionary form. The data inputs are
    -- InputConfig$Framework specific.
    --
    -- -   @TensorFlow@: You must specify the name and shape (NHWC format) of
    --     the expected data inputs using a dictionary format for your trained
    --     model. The dictionary formats required for the console and CLI are
    --     different.
    --
    --     -   Examples for one input:
    --
    --         -   If using the console, @{\"input\":[1,1024,1024,3]}@
    --
    --         -   If using the CLI, @{\\\"input\\\":[1,1024,1024,3]}@
    --
    --     -   Examples for two inputs:
    --
    --         -   If using the console,
    --             @{\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}@
    --
    --         -   If using the CLI,
    --             @{\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}@
    --
    -- -   @KERAS@: You must specify the name and shape (NCHW format) of
    --     expected data inputs using a dictionary format for your trained
    --     model. Note that while Keras model artifacts should be uploaded in
    --     NHWC (channel-last) format, @DataInputConfig@ should be specified in
    --     NCHW (channel-first) format. The dictionary formats required for the
    --     console and CLI are different.
    --
    --     -   Examples for one input:
    --
    --         -   If using the console, @{\"input_1\":[1,3,224,224]}@
    --
    --         -   If using the CLI, @{\\\"input_1\\\":[1,3,224,224]}@
    --
    --     -   Examples for two inputs:
    --
    --         -   If using the console,
    --             @{\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} @
    --
    --         -   If using the CLI,
    --             @{\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}@
    --
    -- -   @MXNET\/ONNX\/DARKNET@: You must specify the name and shape (NCHW
    --     format) of the expected data inputs in order using a dictionary
    --     format for your trained model. The dictionary formats required for
    --     the console and CLI are different.
    --
    --     -   Examples for one input:
    --
    --         -   If using the console, @{\"data\":[1,3,1024,1024]}@
    --
    --         -   If using the CLI, @{\\\"data\\\":[1,3,1024,1024]}@
    --
    --     -   Examples for two inputs:
    --
    --         -   If using the console,
    --             @{\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} @
    --
    --         -   If using the CLI,
    --             @{\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}@
    --
    -- -   @PyTorch@: You can either specify the name and shape (NCHW format)
    --     of expected data inputs in order using a dictionary format for your
    --     trained model or you can specify the shape only using a list format.
    --     The dictionary formats required for the console and CLI are
    --     different. The list formats for the console and CLI are the same.
    --
    --     -   Examples for one input in dictionary format:
    --
    --         -   If using the console, @{\"input0\":[1,3,224,224]}@
    --
    --         -   If using the CLI, @{\\\"input0\\\":[1,3,224,224]}@
    --
    --     -   Example for one input in list format: @[[1,3,224,224]]@
    --
    --     -   Examples for two inputs in dictionary format:
    --
    --         -   If using the console,
    --             @{\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}@
    --
    --         -   If using the CLI,
    --             @{\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]} @
    --
    --     -   Example for two inputs in list format:
    --         @[[1,3,224,224], [1,3,224,224]]@
    --
    -- -   @XGBOOST@: input data name and shape are not needed.
    --
    -- @DataInputConfig@ supports the following parameters for @CoreML@
    -- OutputConfig$TargetDevice (ML Model format):
    --
    -- -   @shape@: Input shape, for example
    --     @{\"input_1\": {\"shape\": [1,224,224,3]}}@. In addition to static
    --     input shapes, CoreML converter supports Flexible input shapes:
    --
    --     -   Range Dimension. You can use the Range Dimension feature if you
    --         know the input shape will be within some specific interval in
    --         that dimension, for example:
    --         @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}@
    --
    --     -   Enumerated shapes. Sometimes, the models are trained to work
    --         only on a select set of inputs. You can enumerate all supported
    --         input shapes, for example:
    --         @{\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}@
    --
    -- -   @default_shape@: Default input shape. You can set a default shape
    --     during conversion for both Range Dimension and Enumerated Shapes.
    --     For example
    --     @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}@
    --
    -- -   @type@: Input type. Allowed values: @Image@ and @Tensor@. By
    --     default, the converter generates an ML Model with inputs of type
    --     Tensor (MultiArray). User can set input type to be Image. Image
    --     input type requires additional input parameters such as @bias@ and
    --     @scale@.
    --
    -- -   @bias@: If the input type is an Image, you need to provide the bias
    --     vector.
    --
    -- -   @scale@: If the input type is an Image, you need to provide a scale
    --     factor.
    --
    -- CoreML @ClassifierConfig@ parameters can be specified using
    -- OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and
    -- PyTorch models. CoreML conversion examples:
    --
    -- -   Tensor type input:
    --
    --     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}@
    --
    -- -   Tensor type input without input name (PyTorch):
    --
    --     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]@
    --
    -- -   Image type input:
    --
    --     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}@
    --
    --     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
    --
    -- -   Image type input without input name (PyTorch):
    --
    --     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]@
    --
    --     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
    --
    -- Depending on the model format, @DataInputConfig@ requires the following
    -- parameters for @ml_eia2@
    -- <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice OutputConfig:TargetDevice>.
    --
    -- -   For TensorFlow models saved in the SavedModel format, specify the
    --     input names from @signature_def_key@ and the input model shapes for
    --     @DataInputConfig@. Specify the @signature_def_key@ in
    --     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
    --     if the model does not use TensorFlow\'s default signature def key.
    --     For example:
    --
    --     -   @\"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}@
    --
    --     -   @\"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}@
    --
    -- -   For TensorFlow models saved as a frozen graph, specify the input
    --     tensor names and shapes in @DataInputConfig@ and the output tensor
    --     names for @output_names@ in
    --     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
    --     . For example:
    --
    --     -   @\"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}@
    --
    --     -   @\"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}@
    InputConfig -> Text
dataInputConfig :: Prelude.Text,
    -- | Identifies the framework in which the model was trained. For example:
    -- TENSORFLOW.
    InputConfig -> Framework
framework :: Framework
  }
  deriving (InputConfig -> InputConfig -> Bool
(InputConfig -> InputConfig -> Bool)
-> (InputConfig -> InputConfig -> Bool) -> Eq InputConfig
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: InputConfig -> InputConfig -> Bool
$c/= :: InputConfig -> InputConfig -> Bool
== :: InputConfig -> InputConfig -> Bool
$c== :: InputConfig -> InputConfig -> Bool
Prelude.Eq, ReadPrec [InputConfig]
ReadPrec InputConfig
Int -> ReadS InputConfig
ReadS [InputConfig]
(Int -> ReadS InputConfig)
-> ReadS [InputConfig]
-> ReadPrec InputConfig
-> ReadPrec [InputConfig]
-> Read InputConfig
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [InputConfig]
$creadListPrec :: ReadPrec [InputConfig]
readPrec :: ReadPrec InputConfig
$creadPrec :: ReadPrec InputConfig
readList :: ReadS [InputConfig]
$creadList :: ReadS [InputConfig]
readsPrec :: Int -> ReadS InputConfig
$creadsPrec :: Int -> ReadS InputConfig
Prelude.Read, Int -> InputConfig -> ShowS
[InputConfig] -> ShowS
InputConfig -> String
(Int -> InputConfig -> ShowS)
-> (InputConfig -> String)
-> ([InputConfig] -> ShowS)
-> Show InputConfig
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [InputConfig] -> ShowS
$cshowList :: [InputConfig] -> ShowS
show :: InputConfig -> String
$cshow :: InputConfig -> String
showsPrec :: Int -> InputConfig -> ShowS
$cshowsPrec :: Int -> InputConfig -> ShowS
Prelude.Show, (forall x. InputConfig -> Rep InputConfig x)
-> (forall x. Rep InputConfig x -> InputConfig)
-> Generic InputConfig
forall x. Rep InputConfig x -> InputConfig
forall x. InputConfig -> Rep InputConfig x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep InputConfig x -> InputConfig
$cfrom :: forall x. InputConfig -> Rep InputConfig x
Prelude.Generic)

-- |
-- Create a value of 'InputConfig' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'frameworkVersion', 'inputConfig_frameworkVersion' - Specifies the framework version to use.
--
-- This API field is only supported for PyTorch framework versions @1.4@,
-- @1.5@, and @1.6@ for cloud instance target devices: @ml_c4@, @ml_c5@,
-- @ml_m4@, @ml_m5@, @ml_p2@, @ml_p3@, and @ml_g4dn@.
--
-- 's3Uri', 'inputConfig_s3Uri' - The S3 path where the model artifacts, which result from model training,
-- are stored. This path must point to a single gzip compressed tar archive
-- (.tar.gz suffix).
--
-- 'dataInputConfig', 'inputConfig_dataInputConfig' - Specifies the name and shape of the expected data inputs for your
-- trained model with a JSON dictionary form. The data inputs are
-- InputConfig$Framework specific.
--
-- -   @TensorFlow@: You must specify the name and shape (NHWC format) of
--     the expected data inputs using a dictionary format for your trained
--     model. The dictionary formats required for the console and CLI are
--     different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input\":[1,1024,1024,3]}@
--
--         -   If using the CLI, @{\\\"input\\\":[1,1024,1024,3]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}@
--
--         -   If using the CLI,
--             @{\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}@
--
-- -   @KERAS@: You must specify the name and shape (NCHW format) of
--     expected data inputs using a dictionary format for your trained
--     model. Note that while Keras model artifacts should be uploaded in
--     NHWC (channel-last) format, @DataInputConfig@ should be specified in
--     NCHW (channel-first) format. The dictionary formats required for the
--     console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input_1\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input_1\\\":[1,3,224,224]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} @
--
--         -   If using the CLI,
--             @{\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}@
--
-- -   @MXNET\/ONNX\/DARKNET@: You must specify the name and shape (NCHW
--     format) of the expected data inputs in order using a dictionary
--     format for your trained model. The dictionary formats required for
--     the console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"data\":[1,3,1024,1024]}@
--
--         -   If using the CLI, @{\\\"data\\\":[1,3,1024,1024]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} @
--
--         -   If using the CLI,
--             @{\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}@
--
-- -   @PyTorch@: You can either specify the name and shape (NCHW format)
--     of expected data inputs in order using a dictionary format for your
--     trained model or you can specify the shape only using a list format.
--     The dictionary formats required for the console and CLI are
--     different. The list formats for the console and CLI are the same.
--
--     -   Examples for one input in dictionary format:
--
--         -   If using the console, @{\"input0\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input0\\\":[1,3,224,224]}@
--
--     -   Example for one input in list format: @[[1,3,224,224]]@
--
--     -   Examples for two inputs in dictionary format:
--
--         -   If using the console,
--             @{\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}@
--
--         -   If using the CLI,
--             @{\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]} @
--
--     -   Example for two inputs in list format:
--         @[[1,3,224,224], [1,3,224,224]]@
--
-- -   @XGBOOST@: input data name and shape are not needed.
--
-- @DataInputConfig@ supports the following parameters for @CoreML@
-- OutputConfig$TargetDevice (ML Model format):
--
-- -   @shape@: Input shape, for example
--     @{\"input_1\": {\"shape\": [1,224,224,3]}}@. In addition to static
--     input shapes, CoreML converter supports Flexible input shapes:
--
--     -   Range Dimension. You can use the Range Dimension feature if you
--         know the input shape will be within some specific interval in
--         that dimension, for example:
--         @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}@
--
--     -   Enumerated shapes. Sometimes, the models are trained to work
--         only on a select set of inputs. You can enumerate all supported
--         input shapes, for example:
--         @{\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}@
--
-- -   @default_shape@: Default input shape. You can set a default shape
--     during conversion for both Range Dimension and Enumerated Shapes.
--     For example
--     @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}@
--
-- -   @type@: Input type. Allowed values: @Image@ and @Tensor@. By
--     default, the converter generates an ML Model with inputs of type
--     Tensor (MultiArray). User can set input type to be Image. Image
--     input type requires additional input parameters such as @bias@ and
--     @scale@.
--
-- -   @bias@: If the input type is an Image, you need to provide the bias
--     vector.
--
-- -   @scale@: If the input type is an Image, you need to provide a scale
--     factor.
--
-- CoreML @ClassifierConfig@ parameters can be specified using
-- OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and
-- PyTorch models. CoreML conversion examples:
--
-- -   Tensor type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}@
--
-- -   Tensor type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]@
--
-- -   Image type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- -   Image type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- Depending on the model format, @DataInputConfig@ requires the following
-- parameters for @ml_eia2@
-- <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice OutputConfig:TargetDevice>.
--
-- -   For TensorFlow models saved in the SavedModel format, specify the
--     input names from @signature_def_key@ and the input model shapes for
--     @DataInputConfig@. Specify the @signature_def_key@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     if the model does not use TensorFlow\'s default signature def key.
--     For example:
--
--     -   @\"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}@
--
-- -   For TensorFlow models saved as a frozen graph, specify the input
--     tensor names and shapes in @DataInputConfig@ and the output tensor
--     names for @output_names@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     . For example:
--
--     -   @\"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}@
--
-- 'framework', 'inputConfig_framework' - Identifies the framework in which the model was trained. For example:
-- TENSORFLOW.
newInputConfig ::
  -- | 's3Uri'
  Prelude.Text ->
  -- | 'dataInputConfig'
  Prelude.Text ->
  -- | 'framework'
  Framework ->
  InputConfig
newInputConfig :: Text -> Text -> Framework -> InputConfig
newInputConfig Text
pS3Uri_ Text
pDataInputConfig_ Framework
pFramework_ =
  InputConfig' :: Maybe Text -> Text -> Text -> Framework -> InputConfig
InputConfig'
    { $sel:frameworkVersion:InputConfig' :: Maybe Text
frameworkVersion = Maybe Text
forall a. Maybe a
Prelude.Nothing,
      $sel:s3Uri:InputConfig' :: Text
s3Uri = Text
pS3Uri_,
      $sel:dataInputConfig:InputConfig' :: Text
dataInputConfig = Text
pDataInputConfig_,
      $sel:framework:InputConfig' :: Framework
framework = Framework
pFramework_
    }

-- | Specifies the framework version to use.
--
-- This API field is only supported for PyTorch framework versions @1.4@,
-- @1.5@, and @1.6@ for cloud instance target devices: @ml_c4@, @ml_c5@,
-- @ml_m4@, @ml_m5@, @ml_p2@, @ml_p3@, and @ml_g4dn@.
inputConfig_frameworkVersion :: Lens.Lens' InputConfig (Prelude.Maybe Prelude.Text)
inputConfig_frameworkVersion :: (Maybe Text -> f (Maybe Text)) -> InputConfig -> f InputConfig
inputConfig_frameworkVersion = (InputConfig -> Maybe Text)
-> (InputConfig -> Maybe Text -> InputConfig)
-> Lens InputConfig InputConfig (Maybe Text) (Maybe Text)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Maybe Text
frameworkVersion :: Maybe Text
$sel:frameworkVersion:InputConfig' :: InputConfig -> Maybe Text
frameworkVersion} -> Maybe Text
frameworkVersion) (\s :: InputConfig
s@InputConfig' {} Maybe Text
a -> InputConfig
s {$sel:frameworkVersion:InputConfig' :: Maybe Text
frameworkVersion = Maybe Text
a} :: InputConfig)

-- | The S3 path where the model artifacts, which result from model training,
-- are stored. This path must point to a single gzip compressed tar archive
-- (.tar.gz suffix).
inputConfig_s3Uri :: Lens.Lens' InputConfig Prelude.Text
inputConfig_s3Uri :: (Text -> f Text) -> InputConfig -> f InputConfig
inputConfig_s3Uri = (InputConfig -> Text)
-> (InputConfig -> Text -> InputConfig)
-> Lens InputConfig InputConfig Text Text
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Text
s3Uri :: Text
$sel:s3Uri:InputConfig' :: InputConfig -> Text
s3Uri} -> Text
s3Uri) (\s :: InputConfig
s@InputConfig' {} Text
a -> InputConfig
s {$sel:s3Uri:InputConfig' :: Text
s3Uri = Text
a} :: InputConfig)

-- | Specifies the name and shape of the expected data inputs for your
-- trained model with a JSON dictionary form. The data inputs are
-- InputConfig$Framework specific.
--
-- -   @TensorFlow@: You must specify the name and shape (NHWC format) of
--     the expected data inputs using a dictionary format for your trained
--     model. The dictionary formats required for the console and CLI are
--     different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input\":[1,1024,1024,3]}@
--
--         -   If using the CLI, @{\\\"input\\\":[1,1024,1024,3]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}@
--
--         -   If using the CLI,
--             @{\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}@
--
-- -   @KERAS@: You must specify the name and shape (NCHW format) of
--     expected data inputs using a dictionary format for your trained
--     model. Note that while Keras model artifacts should be uploaded in
--     NHWC (channel-last) format, @DataInputConfig@ should be specified in
--     NCHW (channel-first) format. The dictionary formats required for the
--     console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input_1\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input_1\\\":[1,3,224,224]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} @
--
--         -   If using the CLI,
--             @{\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}@
--
-- -   @MXNET\/ONNX\/DARKNET@: You must specify the name and shape (NCHW
--     format) of the expected data inputs in order using a dictionary
--     format for your trained model. The dictionary formats required for
--     the console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"data\":[1,3,1024,1024]}@
--
--         -   If using the CLI, @{\\\"data\\\":[1,3,1024,1024]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} @
--
--         -   If using the CLI,
--             @{\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}@
--
-- -   @PyTorch@: You can either specify the name and shape (NCHW format)
--     of expected data inputs in order using a dictionary format for your
--     trained model or you can specify the shape only using a list format.
--     The dictionary formats required for the console and CLI are
--     different. The list formats for the console and CLI are the same.
--
--     -   Examples for one input in dictionary format:
--
--         -   If using the console, @{\"input0\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input0\\\":[1,3,224,224]}@
--
--     -   Example for one input in list format: @[[1,3,224,224]]@
--
--     -   Examples for two inputs in dictionary format:
--
--         -   If using the console,
--             @{\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}@
--
--         -   If using the CLI,
--             @{\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]} @
--
--     -   Example for two inputs in list format:
--         @[[1,3,224,224], [1,3,224,224]]@
--
-- -   @XGBOOST@: input data name and shape are not needed.
--
-- @DataInputConfig@ supports the following parameters for @CoreML@
-- OutputConfig$TargetDevice (ML Model format):
--
-- -   @shape@: Input shape, for example
--     @{\"input_1\": {\"shape\": [1,224,224,3]}}@. In addition to static
--     input shapes, CoreML converter supports Flexible input shapes:
--
--     -   Range Dimension. You can use the Range Dimension feature if you
--         know the input shape will be within some specific interval in
--         that dimension, for example:
--         @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}@
--
--     -   Enumerated shapes. Sometimes, the models are trained to work
--         only on a select set of inputs. You can enumerate all supported
--         input shapes, for example:
--         @{\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}@
--
-- -   @default_shape@: Default input shape. You can set a default shape
--     during conversion for both Range Dimension and Enumerated Shapes.
--     For example
--     @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}@
--
-- -   @type@: Input type. Allowed values: @Image@ and @Tensor@. By
--     default, the converter generates an ML Model with inputs of type
--     Tensor (MultiArray). User can set input type to be Image. Image
--     input type requires additional input parameters such as @bias@ and
--     @scale@.
--
-- -   @bias@: If the input type is an Image, you need to provide the bias
--     vector.
--
-- -   @scale@: If the input type is an Image, you need to provide a scale
--     factor.
--
-- CoreML @ClassifierConfig@ parameters can be specified using
-- OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and
-- PyTorch models. CoreML conversion examples:
--
-- -   Tensor type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}@
--
-- -   Tensor type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]@
--
-- -   Image type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- -   Image type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- Depending on the model format, @DataInputConfig@ requires the following
-- parameters for @ml_eia2@
-- <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice OutputConfig:TargetDevice>.
--
-- -   For TensorFlow models saved in the SavedModel format, specify the
--     input names from @signature_def_key@ and the input model shapes for
--     @DataInputConfig@. Specify the @signature_def_key@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     if the model does not use TensorFlow\'s default signature def key.
--     For example:
--
--     -   @\"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}@
--
-- -   For TensorFlow models saved as a frozen graph, specify the input
--     tensor names and shapes in @DataInputConfig@ and the output tensor
--     names for @output_names@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     . For example:
--
--     -   @\"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}@
inputConfig_dataInputConfig :: Lens.Lens' InputConfig Prelude.Text
inputConfig_dataInputConfig :: (Text -> f Text) -> InputConfig -> f InputConfig
inputConfig_dataInputConfig = (InputConfig -> Text)
-> (InputConfig -> Text -> InputConfig)
-> Lens InputConfig InputConfig Text Text
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Text
dataInputConfig :: Text
$sel:dataInputConfig:InputConfig' :: InputConfig -> Text
dataInputConfig} -> Text
dataInputConfig) (\s :: InputConfig
s@InputConfig' {} Text
a -> InputConfig
s {$sel:dataInputConfig:InputConfig' :: Text
dataInputConfig = Text
a} :: InputConfig)

-- | Identifies the framework in which the model was trained. For example:
-- TENSORFLOW.
inputConfig_framework :: Lens.Lens' InputConfig Framework
inputConfig_framework :: (Framework -> f Framework) -> InputConfig -> f InputConfig
inputConfig_framework = (InputConfig -> Framework)
-> (InputConfig -> Framework -> InputConfig)
-> Lens InputConfig InputConfig Framework Framework
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Framework
framework :: Framework
$sel:framework:InputConfig' :: InputConfig -> Framework
framework} -> Framework
framework) (\s :: InputConfig
s@InputConfig' {} Framework
a -> InputConfig
s {$sel:framework:InputConfig' :: Framework
framework = Framework
a} :: InputConfig)

instance Core.FromJSON InputConfig where
  parseJSON :: Value -> Parser InputConfig
parseJSON =
    String
-> (Object -> Parser InputConfig) -> Value -> Parser InputConfig
forall a. String -> (Object -> Parser a) -> Value -> Parser a
Core.withObject
      String
"InputConfig"
      ( \Object
x ->
          Maybe Text -> Text -> Text -> Framework -> InputConfig
InputConfig'
            (Maybe Text -> Text -> Text -> Framework -> InputConfig)
-> Parser (Maybe Text)
-> Parser (Text -> Text -> Framework -> InputConfig)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (Object
x Object -> Text -> Parser (Maybe Text)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"FrameworkVersion")
            Parser (Text -> Text -> Framework -> InputConfig)
-> Parser Text -> Parser (Text -> Framework -> InputConfig)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser Text
forall a. FromJSON a => Object -> Text -> Parser a
Core..: Text
"S3Uri")
            Parser (Text -> Framework -> InputConfig)
-> Parser Text -> Parser (Framework -> InputConfig)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser Text
forall a. FromJSON a => Object -> Text -> Parser a
Core..: Text
"DataInputConfig")
            Parser (Framework -> InputConfig)
-> Parser Framework -> Parser InputConfig
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser Framework
forall a. FromJSON a => Object -> Text -> Parser a
Core..: Text
"Framework")
      )

instance Prelude.Hashable InputConfig

instance Prelude.NFData InputConfig

instance Core.ToJSON InputConfig where
  toJSON :: InputConfig -> Value
toJSON InputConfig' {Maybe Text
Text
Framework
framework :: Framework
dataInputConfig :: Text
s3Uri :: Text
frameworkVersion :: Maybe Text
$sel:framework:InputConfig' :: InputConfig -> Framework
$sel:dataInputConfig:InputConfig' :: InputConfig -> Text
$sel:s3Uri:InputConfig' :: InputConfig -> Text
$sel:frameworkVersion:InputConfig' :: InputConfig -> Maybe Text
..} =
    [Pair] -> Value
Core.object
      ( [Maybe Pair] -> [Pair]
forall a. [Maybe a] -> [a]
Prelude.catMaybes
          [ (Text
"FrameworkVersion" Text -> Text -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=)
              (Text -> Pair) -> Maybe Text -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
frameworkVersion,
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just (Text
"S3Uri" Text -> Text -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= Text
s3Uri),
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just
              (Text
"DataInputConfig" Text -> Text -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= Text
dataInputConfig),
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just (Text
"Framework" Text -> Framework -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= Framework
framework)
          ]
      )