{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.Rekognition.IndexFaces
-- Copyright   : (c) 2013-2021 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay <brendan.g.hay+amazonka@gmail.com>
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Detects faces in the input image and adds them to the specified
-- collection.
--
-- Amazon Rekognition doesn\'t save the actual faces that are detected.
-- Instead, the underlying detection algorithm first detects the faces in
-- the input image. For each face, the algorithm extracts facial features
-- into a feature vector, and stores it in the backend database. Amazon
-- Rekognition uses feature vectors when it performs face match and search
-- operations using the SearchFaces and SearchFacesByImage operations.
--
-- For more information, see Adding Faces to a Collection in the Amazon
-- Rekognition Developer Guide.
--
-- To get the number of faces in a collection, call DescribeCollection.
--
-- If you\'re using version 1.0 of the face detection model, @IndexFaces@
-- indexes the 15 largest faces in the input image. Later versions of the
-- face detection model index the 100 largest faces in the input image.
--
-- If you\'re using version 4 or later of the face model, image orientation
-- information is not returned in the @OrientationCorrection@ field.
--
-- To determine which version of the model you\'re using, call
-- DescribeCollection and supply the collection ID. You can also get the
-- model version from the value of @FaceModelVersion@ in the response from
-- @IndexFaces@
--
-- For more information, see Model Versioning in the Amazon Rekognition
-- Developer Guide.
--
-- If you provide the optional @ExternalImageId@ for the input image you
-- provided, Amazon Rekognition associates this ID with all faces that it
-- detects. When you call the ListFaces operation, the response returns the
-- external ID. You can use this external image ID to create a client-side
-- index to associate the faces with each image. You can then use the index
-- to find all faces in an image.
--
-- You can specify the maximum number of faces to index with the @MaxFaces@
-- input parameter. This is useful when you want to index the largest faces
-- in an image and don\'t want to index smaller faces, such as those
-- belonging to people standing in the background.
--
-- The @QualityFilter@ input parameter allows you to filter out detected
-- faces that don’t meet a required quality bar. The quality bar is based
-- on a variety of common use cases. By default, @IndexFaces@ chooses the
-- quality bar that\'s used to filter faces. You can also explicitly choose
-- the quality bar. Use @QualityFilter@, to set the quality bar by
-- specifying @LOW@, @MEDIUM@, or @HIGH@. If you do not want to filter
-- detected faces, specify @NONE@.
--
-- To use quality filtering, you need a collection associated with version
-- 3 of the face model or higher. To get the version of the face model
-- associated with a collection, call DescribeCollection.
--
-- Information about faces detected in an image, but not indexed, is
-- returned in an array of UnindexedFace objects, @UnindexedFaces@. Faces
-- aren\'t indexed for reasons such as:
--
-- -   The number of faces detected exceeds the value of the @MaxFaces@
--     request parameter.
--
-- -   The face is too small compared to the image dimensions.
--
-- -   The face is too blurry.
--
-- -   The image is too dark.
--
-- -   The face has an extreme pose.
--
-- -   The face doesn’t have enough detail to be suitable for face search.
--
-- In response, the @IndexFaces@ operation returns an array of metadata for
-- all detected faces, @FaceRecords@. This includes:
--
-- -   The bounding box, @BoundingBox@, of the detected face.
--
-- -   A confidence value, @Confidence@, which indicates the confidence
--     that the bounding box contains a face.
--
-- -   A face ID, @FaceId@, assigned by the service for each face that\'s
--     detected and stored.
--
-- -   An image ID, @ImageId@, assigned by the service for the input image.
--
-- If you request all facial attributes (by using the @detectionAttributes@
-- parameter), Amazon Rekognition returns detailed facial attributes, such
-- as facial landmarks (for example, location of eye and mouth) and other
-- facial attributes. If you provide the same image, specify the same
-- collection, and use the same external ID in the @IndexFaces@ operation,
-- Amazon Rekognition doesn\'t save duplicate face metadata.
--
-- The input image is passed either as base64-encoded image bytes, or as a
-- reference to an image in an Amazon S3 bucket. If you use the AWS CLI to
-- call Amazon Rekognition operations, passing image bytes isn\'t
-- supported. The image must be formatted as a PNG or JPEG file.
--
-- This operation requires permissions to perform the
-- @rekognition:IndexFaces@ action.
module Amazonka.Rekognition.IndexFaces
  ( -- * Creating a Request
    IndexFaces (..),
    newIndexFaces,

    -- * Request Lenses
    indexFaces_externalImageId,
    indexFaces_qualityFilter,
    indexFaces_maxFaces,
    indexFaces_detectionAttributes,
    indexFaces_collectionId,
    indexFaces_image,

    -- * Destructuring the Response
    IndexFacesResponse (..),
    newIndexFacesResponse,

    -- * Response Lenses
    indexFacesResponse_faceModelVersion,
    indexFacesResponse_faceRecords,
    indexFacesResponse_orientationCorrection,
    indexFacesResponse_unindexedFaces,
    indexFacesResponse_httpStatus,
  )
where

import qualified Amazonka.Core as Core
import qualified Amazonka.Lens as Lens
import qualified Amazonka.Prelude as Prelude
import Amazonka.Rekognition.Types
import qualified Amazonka.Request as Request
import qualified Amazonka.Response as Response

-- | /See:/ 'newIndexFaces' smart constructor.
data IndexFaces = IndexFaces'
  { -- | The ID you want to assign to all the faces detected in the image.
    IndexFaces -> Maybe Text
externalImageId :: Prelude.Maybe Prelude.Text,
    -- | A filter that specifies a quality bar for how much filtering is done to
    -- identify faces. Filtered faces aren\'t indexed. If you specify @AUTO@,
    -- Amazon Rekognition chooses the quality bar. If you specify @LOW@,
    -- @MEDIUM@, or @HIGH@, filtering removes all faces that don’t meet the
    -- chosen quality bar. The default value is @AUTO@. The quality bar is
    -- based on a variety of common use cases. Low-quality detections can occur
    -- for a number of reasons. Some examples are an object that\'s
    -- misidentified as a face, a face that\'s too blurry, or a face with a
    -- pose that\'s too extreme to use. If you specify @NONE@, no filtering is
    -- performed.
    --
    -- To use quality filtering, the collection you are using must be
    -- associated with version 3 of the face model or higher.
    IndexFaces -> Maybe QualityFilter
qualityFilter :: Prelude.Maybe QualityFilter,
    -- | The maximum number of faces to index. The value of @MaxFaces@ must be
    -- greater than or equal to 1. @IndexFaces@ returns no more than 100
    -- detected faces in an image, even if you specify a larger value for
    -- @MaxFaces@.
    --
    -- If @IndexFaces@ detects more faces than the value of @MaxFaces@, the
    -- faces with the lowest quality are filtered out first. If there are still
    -- more faces than the value of @MaxFaces@, the faces with the smallest
    -- bounding boxes are filtered out (up to the number that\'s needed to
    -- satisfy the value of @MaxFaces@). Information about the unindexed faces
    -- is available in the @UnindexedFaces@ array.
    --
    -- The faces that are returned by @IndexFaces@ are sorted by the largest
    -- face bounding box size to the smallest size, in descending order.
    --
    -- @MaxFaces@ can be used with a collection associated with any version of
    -- the face model.
    IndexFaces -> Maybe Natural
maxFaces :: Prelude.Maybe Prelude.Natural,
    -- | An array of facial attributes that you want to be returned. This can be
    -- the default list of attributes or all attributes. If you don\'t specify
    -- a value for @Attributes@ or if you specify @[\"DEFAULT\"]@, the API
    -- returns the following subset of facial attributes: @BoundingBox@,
    -- @Confidence@, @Pose@, @Quality@, and @Landmarks@. If you provide
    -- @[\"ALL\"]@, all facial attributes are returned, but the operation takes
    -- longer to complete.
    --
    -- If you provide both, @[\"ALL\", \"DEFAULT\"]@, the service uses a
    -- logical AND operator to determine which attributes to return (in this
    -- case, all attributes).
    IndexFaces -> Maybe [Attribute]
detectionAttributes :: Prelude.Maybe [Attribute],
    -- | The ID of an existing collection to which you want to add the faces that
    -- are detected in the input images.
    IndexFaces -> Text
collectionId :: Prelude.Text,
    -- | The input image as base64-encoded bytes or an S3 object. If you use the
    -- AWS CLI to call Amazon Rekognition operations, passing base64-encoded
    -- image bytes isn\'t supported.
    --
    -- If you are using an AWS SDK to call Amazon Rekognition, you might not
    -- need to base64-encode image bytes passed using the @Bytes@ field. For
    -- more information, see Images in the Amazon Rekognition developer guide.
    IndexFaces -> Image
image :: Image
  }
  deriving (IndexFaces -> IndexFaces -> Bool
(IndexFaces -> IndexFaces -> Bool)
-> (IndexFaces -> IndexFaces -> Bool) -> Eq IndexFaces
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: IndexFaces -> IndexFaces -> Bool
$c/= :: IndexFaces -> IndexFaces -> Bool
== :: IndexFaces -> IndexFaces -> Bool
$c== :: IndexFaces -> IndexFaces -> Bool
Prelude.Eq, ReadPrec [IndexFaces]
ReadPrec IndexFaces
Int -> ReadS IndexFaces
ReadS [IndexFaces]
(Int -> ReadS IndexFaces)
-> ReadS [IndexFaces]
-> ReadPrec IndexFaces
-> ReadPrec [IndexFaces]
-> Read IndexFaces
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [IndexFaces]
$creadListPrec :: ReadPrec [IndexFaces]
readPrec :: ReadPrec IndexFaces
$creadPrec :: ReadPrec IndexFaces
readList :: ReadS [IndexFaces]
$creadList :: ReadS [IndexFaces]
readsPrec :: Int -> ReadS IndexFaces
$creadsPrec :: Int -> ReadS IndexFaces
Prelude.Read, Int -> IndexFaces -> ShowS
[IndexFaces] -> ShowS
IndexFaces -> String
(Int -> IndexFaces -> ShowS)
-> (IndexFaces -> String)
-> ([IndexFaces] -> ShowS)
-> Show IndexFaces
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [IndexFaces] -> ShowS
$cshowList :: [IndexFaces] -> ShowS
show :: IndexFaces -> String
$cshow :: IndexFaces -> String
showsPrec :: Int -> IndexFaces -> ShowS
$cshowsPrec :: Int -> IndexFaces -> ShowS
Prelude.Show, (forall x. IndexFaces -> Rep IndexFaces x)
-> (forall x. Rep IndexFaces x -> IndexFaces) -> Generic IndexFaces
forall x. Rep IndexFaces x -> IndexFaces
forall x. IndexFaces -> Rep IndexFaces x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep IndexFaces x -> IndexFaces
$cfrom :: forall x. IndexFaces -> Rep IndexFaces x
Prelude.Generic)

-- |
-- Create a value of 'IndexFaces' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'externalImageId', 'indexFaces_externalImageId' - The ID you want to assign to all the faces detected in the image.
--
-- 'qualityFilter', 'indexFaces_qualityFilter' - A filter that specifies a quality bar for how much filtering is done to
-- identify faces. Filtered faces aren\'t indexed. If you specify @AUTO@,
-- Amazon Rekognition chooses the quality bar. If you specify @LOW@,
-- @MEDIUM@, or @HIGH@, filtering removes all faces that don’t meet the
-- chosen quality bar. The default value is @AUTO@. The quality bar is
-- based on a variety of common use cases. Low-quality detections can occur
-- for a number of reasons. Some examples are an object that\'s
-- misidentified as a face, a face that\'s too blurry, or a face with a
-- pose that\'s too extreme to use. If you specify @NONE@, no filtering is
-- performed.
--
-- To use quality filtering, the collection you are using must be
-- associated with version 3 of the face model or higher.
--
-- 'maxFaces', 'indexFaces_maxFaces' - The maximum number of faces to index. The value of @MaxFaces@ must be
-- greater than or equal to 1. @IndexFaces@ returns no more than 100
-- detected faces in an image, even if you specify a larger value for
-- @MaxFaces@.
--
-- If @IndexFaces@ detects more faces than the value of @MaxFaces@, the
-- faces with the lowest quality are filtered out first. If there are still
-- more faces than the value of @MaxFaces@, the faces with the smallest
-- bounding boxes are filtered out (up to the number that\'s needed to
-- satisfy the value of @MaxFaces@). Information about the unindexed faces
-- is available in the @UnindexedFaces@ array.
--
-- The faces that are returned by @IndexFaces@ are sorted by the largest
-- face bounding box size to the smallest size, in descending order.
--
-- @MaxFaces@ can be used with a collection associated with any version of
-- the face model.
--
-- 'detectionAttributes', 'indexFaces_detectionAttributes' - An array of facial attributes that you want to be returned. This can be
-- the default list of attributes or all attributes. If you don\'t specify
-- a value for @Attributes@ or if you specify @[\"DEFAULT\"]@, the API
-- returns the following subset of facial attributes: @BoundingBox@,
-- @Confidence@, @Pose@, @Quality@, and @Landmarks@. If you provide
-- @[\"ALL\"]@, all facial attributes are returned, but the operation takes
-- longer to complete.
--
-- If you provide both, @[\"ALL\", \"DEFAULT\"]@, the service uses a
-- logical AND operator to determine which attributes to return (in this
-- case, all attributes).
--
-- 'collectionId', 'indexFaces_collectionId' - The ID of an existing collection to which you want to add the faces that
-- are detected in the input images.
--
-- 'image', 'indexFaces_image' - The input image as base64-encoded bytes or an S3 object. If you use the
-- AWS CLI to call Amazon Rekognition operations, passing base64-encoded
-- image bytes isn\'t supported.
--
-- If you are using an AWS SDK to call Amazon Rekognition, you might not
-- need to base64-encode image bytes passed using the @Bytes@ field. For
-- more information, see Images in the Amazon Rekognition developer guide.
newIndexFaces ::
  -- | 'collectionId'
  Prelude.Text ->
  -- | 'image'
  Image ->
  IndexFaces
newIndexFaces :: Text -> Image -> IndexFaces
newIndexFaces Text
pCollectionId_ Image
pImage_ =
  IndexFaces' :: Maybe Text
-> Maybe QualityFilter
-> Maybe Natural
-> Maybe [Attribute]
-> Text
-> Image
-> IndexFaces
IndexFaces'
    { $sel:externalImageId:IndexFaces' :: Maybe Text
externalImageId = Maybe Text
forall a. Maybe a
Prelude.Nothing,
      $sel:qualityFilter:IndexFaces' :: Maybe QualityFilter
qualityFilter = Maybe QualityFilter
forall a. Maybe a
Prelude.Nothing,
      $sel:maxFaces:IndexFaces' :: Maybe Natural
maxFaces = Maybe Natural
forall a. Maybe a
Prelude.Nothing,
      $sel:detectionAttributes:IndexFaces' :: Maybe [Attribute]
detectionAttributes = Maybe [Attribute]
forall a. Maybe a
Prelude.Nothing,
      $sel:collectionId:IndexFaces' :: Text
collectionId = Text
pCollectionId_,
      $sel:image:IndexFaces' :: Image
image = Image
pImage_
    }

-- | The ID you want to assign to all the faces detected in the image.
indexFaces_externalImageId :: Lens.Lens' IndexFaces (Prelude.Maybe Prelude.Text)
indexFaces_externalImageId :: (Maybe Text -> f (Maybe Text)) -> IndexFaces -> f IndexFaces
indexFaces_externalImageId = (IndexFaces -> Maybe Text)
-> (IndexFaces -> Maybe Text -> IndexFaces)
-> Lens IndexFaces IndexFaces (Maybe Text) (Maybe Text)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFaces' {Maybe Text
externalImageId :: Maybe Text
$sel:externalImageId:IndexFaces' :: IndexFaces -> Maybe Text
externalImageId} -> Maybe Text
externalImageId) (\s :: IndexFaces
s@IndexFaces' {} Maybe Text
a -> IndexFaces
s {$sel:externalImageId:IndexFaces' :: Maybe Text
externalImageId = Maybe Text
a} :: IndexFaces)

-- | A filter that specifies a quality bar for how much filtering is done to
-- identify faces. Filtered faces aren\'t indexed. If you specify @AUTO@,
-- Amazon Rekognition chooses the quality bar. If you specify @LOW@,
-- @MEDIUM@, or @HIGH@, filtering removes all faces that don’t meet the
-- chosen quality bar. The default value is @AUTO@. The quality bar is
-- based on a variety of common use cases. Low-quality detections can occur
-- for a number of reasons. Some examples are an object that\'s
-- misidentified as a face, a face that\'s too blurry, or a face with a
-- pose that\'s too extreme to use. If you specify @NONE@, no filtering is
-- performed.
--
-- To use quality filtering, the collection you are using must be
-- associated with version 3 of the face model or higher.
indexFaces_qualityFilter :: Lens.Lens' IndexFaces (Prelude.Maybe QualityFilter)
indexFaces_qualityFilter :: (Maybe QualityFilter -> f (Maybe QualityFilter))
-> IndexFaces -> f IndexFaces
indexFaces_qualityFilter = (IndexFaces -> Maybe QualityFilter)
-> (IndexFaces -> Maybe QualityFilter -> IndexFaces)
-> Lens
     IndexFaces IndexFaces (Maybe QualityFilter) (Maybe QualityFilter)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFaces' {Maybe QualityFilter
qualityFilter :: Maybe QualityFilter
$sel:qualityFilter:IndexFaces' :: IndexFaces -> Maybe QualityFilter
qualityFilter} -> Maybe QualityFilter
qualityFilter) (\s :: IndexFaces
s@IndexFaces' {} Maybe QualityFilter
a -> IndexFaces
s {$sel:qualityFilter:IndexFaces' :: Maybe QualityFilter
qualityFilter = Maybe QualityFilter
a} :: IndexFaces)

-- | The maximum number of faces to index. The value of @MaxFaces@ must be
-- greater than or equal to 1. @IndexFaces@ returns no more than 100
-- detected faces in an image, even if you specify a larger value for
-- @MaxFaces@.
--
-- If @IndexFaces@ detects more faces than the value of @MaxFaces@, the
-- faces with the lowest quality are filtered out first. If there are still
-- more faces than the value of @MaxFaces@, the faces with the smallest
-- bounding boxes are filtered out (up to the number that\'s needed to
-- satisfy the value of @MaxFaces@). Information about the unindexed faces
-- is available in the @UnindexedFaces@ array.
--
-- The faces that are returned by @IndexFaces@ are sorted by the largest
-- face bounding box size to the smallest size, in descending order.
--
-- @MaxFaces@ can be used with a collection associated with any version of
-- the face model.
indexFaces_maxFaces :: Lens.Lens' IndexFaces (Prelude.Maybe Prelude.Natural)
indexFaces_maxFaces :: (Maybe Natural -> f (Maybe Natural)) -> IndexFaces -> f IndexFaces
indexFaces_maxFaces = (IndexFaces -> Maybe Natural)
-> (IndexFaces -> Maybe Natural -> IndexFaces)
-> Lens IndexFaces IndexFaces (Maybe Natural) (Maybe Natural)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFaces' {Maybe Natural
maxFaces :: Maybe Natural
$sel:maxFaces:IndexFaces' :: IndexFaces -> Maybe Natural
maxFaces} -> Maybe Natural
maxFaces) (\s :: IndexFaces
s@IndexFaces' {} Maybe Natural
a -> IndexFaces
s {$sel:maxFaces:IndexFaces' :: Maybe Natural
maxFaces = Maybe Natural
a} :: IndexFaces)

-- | An array of facial attributes that you want to be returned. This can be
-- the default list of attributes or all attributes. If you don\'t specify
-- a value for @Attributes@ or if you specify @[\"DEFAULT\"]@, the API
-- returns the following subset of facial attributes: @BoundingBox@,
-- @Confidence@, @Pose@, @Quality@, and @Landmarks@. If you provide
-- @[\"ALL\"]@, all facial attributes are returned, but the operation takes
-- longer to complete.
--
-- If you provide both, @[\"ALL\", \"DEFAULT\"]@, the service uses a
-- logical AND operator to determine which attributes to return (in this
-- case, all attributes).
indexFaces_detectionAttributes :: Lens.Lens' IndexFaces (Prelude.Maybe [Attribute])
indexFaces_detectionAttributes :: (Maybe [Attribute] -> f (Maybe [Attribute]))
-> IndexFaces -> f IndexFaces
indexFaces_detectionAttributes = (IndexFaces -> Maybe [Attribute])
-> (IndexFaces -> Maybe [Attribute] -> IndexFaces)
-> Lens
     IndexFaces IndexFaces (Maybe [Attribute]) (Maybe [Attribute])
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFaces' {Maybe [Attribute]
detectionAttributes :: Maybe [Attribute]
$sel:detectionAttributes:IndexFaces' :: IndexFaces -> Maybe [Attribute]
detectionAttributes} -> Maybe [Attribute]
detectionAttributes) (\s :: IndexFaces
s@IndexFaces' {} Maybe [Attribute]
a -> IndexFaces
s {$sel:detectionAttributes:IndexFaces' :: Maybe [Attribute]
detectionAttributes = Maybe [Attribute]
a} :: IndexFaces) ((Maybe [Attribute] -> f (Maybe [Attribute]))
 -> IndexFaces -> f IndexFaces)
-> ((Maybe [Attribute] -> f (Maybe [Attribute]))
    -> Maybe [Attribute] -> f (Maybe [Attribute]))
-> (Maybe [Attribute] -> f (Maybe [Attribute]))
-> IndexFaces
-> f IndexFaces
forall b c a. (b -> c) -> (a -> b) -> a -> c
Prelude.. AnIso [Attribute] [Attribute] [Attribute] [Attribute]
-> Iso
     (Maybe [Attribute])
     (Maybe [Attribute])
     (Maybe [Attribute])
     (Maybe [Attribute])
forall (f :: * -> *) (g :: * -> *) s t a b.
(Functor f, Functor g) =>
AnIso s t a b -> Iso (f s) (g t) (f a) (g b)
Lens.mapping AnIso [Attribute] [Attribute] [Attribute] [Attribute]
forall s t a b. (Coercible s a, Coercible t b) => Iso s t a b
Lens.coerced

-- | The ID of an existing collection to which you want to add the faces that
-- are detected in the input images.
indexFaces_collectionId :: Lens.Lens' IndexFaces Prelude.Text
indexFaces_collectionId :: (Text -> f Text) -> IndexFaces -> f IndexFaces
indexFaces_collectionId = (IndexFaces -> Text)
-> (IndexFaces -> Text -> IndexFaces)
-> Lens IndexFaces IndexFaces Text Text
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFaces' {Text
collectionId :: Text
$sel:collectionId:IndexFaces' :: IndexFaces -> Text
collectionId} -> Text
collectionId) (\s :: IndexFaces
s@IndexFaces' {} Text
a -> IndexFaces
s {$sel:collectionId:IndexFaces' :: Text
collectionId = Text
a} :: IndexFaces)

-- | The input image as base64-encoded bytes or an S3 object. If you use the
-- AWS CLI to call Amazon Rekognition operations, passing base64-encoded
-- image bytes isn\'t supported.
--
-- If you are using an AWS SDK to call Amazon Rekognition, you might not
-- need to base64-encode image bytes passed using the @Bytes@ field. For
-- more information, see Images in the Amazon Rekognition developer guide.
indexFaces_image :: Lens.Lens' IndexFaces Image
indexFaces_image :: (Image -> f Image) -> IndexFaces -> f IndexFaces
indexFaces_image = (IndexFaces -> Image)
-> (IndexFaces -> Image -> IndexFaces)
-> Lens IndexFaces IndexFaces Image Image
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFaces' {Image
image :: Image
$sel:image:IndexFaces' :: IndexFaces -> Image
image} -> Image
image) (\s :: IndexFaces
s@IndexFaces' {} Image
a -> IndexFaces
s {$sel:image:IndexFaces' :: Image
image = Image
a} :: IndexFaces)

instance Core.AWSRequest IndexFaces where
  type AWSResponse IndexFaces = IndexFacesResponse
  request :: IndexFaces -> Request IndexFaces
request = Service -> IndexFaces -> Request IndexFaces
forall a. (ToRequest a, ToJSON a) => Service -> a -> Request a
Request.postJSON Service
defaultService
  response :: Logger
-> Service
-> Proxy IndexFaces
-> ClientResponse ClientBody
-> m (Either Error (ClientResponse (AWSResponse IndexFaces)))
response =
    (Int
 -> ResponseHeaders
 -> Object
 -> Either String (AWSResponse IndexFaces))
-> Logger
-> Service
-> Proxy IndexFaces
-> ClientResponse ClientBody
-> m (Either Error (ClientResponse (AWSResponse IndexFaces)))
forall (m :: * -> *) a.
MonadResource m =>
(Int -> ResponseHeaders -> Object -> Either String (AWSResponse a))
-> Logger
-> Service
-> Proxy a
-> ClientResponse ClientBody
-> m (Either Error (ClientResponse (AWSResponse a)))
Response.receiveJSON
      ( \Int
s ResponseHeaders
h Object
x ->
          Maybe Text
-> Maybe [FaceRecord]
-> Maybe OrientationCorrection
-> Maybe [UnindexedFace]
-> Int
-> IndexFacesResponse
IndexFacesResponse'
            (Maybe Text
 -> Maybe [FaceRecord]
 -> Maybe OrientationCorrection
 -> Maybe [UnindexedFace]
 -> Int
 -> IndexFacesResponse)
-> Either String (Maybe Text)
-> Either
     String
     (Maybe [FaceRecord]
      -> Maybe OrientationCorrection
      -> Maybe [UnindexedFace]
      -> Int
      -> IndexFacesResponse)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (Object
x Object -> Text -> Either String (Maybe Text)
forall a. FromJSON a => Object -> Text -> Either String (Maybe a)
Core..?> Text
"FaceModelVersion")
            Either
  String
  (Maybe [FaceRecord]
   -> Maybe OrientationCorrection
   -> Maybe [UnindexedFace]
   -> Int
   -> IndexFacesResponse)
-> Either String (Maybe [FaceRecord])
-> Either
     String
     (Maybe OrientationCorrection
      -> Maybe [UnindexedFace] -> Int -> IndexFacesResponse)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Either String (Maybe (Maybe [FaceRecord]))
forall a. FromJSON a => Object -> Text -> Either String (Maybe a)
Core..?> Text
"FaceRecords" Either String (Maybe (Maybe [FaceRecord]))
-> Maybe [FaceRecord] -> Either String (Maybe [FaceRecord])
forall (f :: * -> *) a. Functor f => f (Maybe a) -> a -> f a
Core..!@ Maybe [FaceRecord]
forall a. Monoid a => a
Prelude.mempty)
            Either
  String
  (Maybe OrientationCorrection
   -> Maybe [UnindexedFace] -> Int -> IndexFacesResponse)
-> Either String (Maybe OrientationCorrection)
-> Either
     String (Maybe [UnindexedFace] -> Int -> IndexFacesResponse)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Either String (Maybe OrientationCorrection)
forall a. FromJSON a => Object -> Text -> Either String (Maybe a)
Core..?> Text
"OrientationCorrection")
            Either String (Maybe [UnindexedFace] -> Int -> IndexFacesResponse)
-> Either String (Maybe [UnindexedFace])
-> Either String (Int -> IndexFacesResponse)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Either String (Maybe (Maybe [UnindexedFace]))
forall a. FromJSON a => Object -> Text -> Either String (Maybe a)
Core..?> Text
"UnindexedFaces" Either String (Maybe (Maybe [UnindexedFace]))
-> Maybe [UnindexedFace] -> Either String (Maybe [UnindexedFace])
forall (f :: * -> *) a. Functor f => f (Maybe a) -> a -> f a
Core..!@ Maybe [UnindexedFace]
forall a. Monoid a => a
Prelude.mempty)
            Either String (Int -> IndexFacesResponse)
-> Either String Int -> Either String IndexFacesResponse
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Int -> Either String Int
forall (f :: * -> *) a. Applicative f => a -> f a
Prelude.pure (Int -> Int
forall a. Enum a => a -> Int
Prelude.fromEnum Int
s))
      )

instance Prelude.Hashable IndexFaces

instance Prelude.NFData IndexFaces

instance Core.ToHeaders IndexFaces where
  toHeaders :: IndexFaces -> ResponseHeaders
toHeaders =
    ResponseHeaders -> IndexFaces -> ResponseHeaders
forall a b. a -> b -> a
Prelude.const
      ( [ResponseHeaders] -> ResponseHeaders
forall a. Monoid a => [a] -> a
Prelude.mconcat
          [ HeaderName
"X-Amz-Target"
              HeaderName -> ByteString -> ResponseHeaders
forall a. ToHeader a => HeaderName -> a -> ResponseHeaders
Core.=# ( ByteString
"RekognitionService.IndexFaces" ::
                          Prelude.ByteString
                      ),
            HeaderName
"Content-Type"
              HeaderName -> ByteString -> ResponseHeaders
forall a. ToHeader a => HeaderName -> a -> ResponseHeaders
Core.=# ( ByteString
"application/x-amz-json-1.1" ::
                          Prelude.ByteString
                      )
          ]
      )

instance Core.ToJSON IndexFaces where
  toJSON :: IndexFaces -> Value
toJSON IndexFaces' {Maybe Natural
Maybe [Attribute]
Maybe Text
Maybe QualityFilter
Text
Image
image :: Image
collectionId :: Text
detectionAttributes :: Maybe [Attribute]
maxFaces :: Maybe Natural
qualityFilter :: Maybe QualityFilter
externalImageId :: Maybe Text
$sel:image:IndexFaces' :: IndexFaces -> Image
$sel:collectionId:IndexFaces' :: IndexFaces -> Text
$sel:detectionAttributes:IndexFaces' :: IndexFaces -> Maybe [Attribute]
$sel:maxFaces:IndexFaces' :: IndexFaces -> Maybe Natural
$sel:qualityFilter:IndexFaces' :: IndexFaces -> Maybe QualityFilter
$sel:externalImageId:IndexFaces' :: IndexFaces -> Maybe Text
..} =
    [Pair] -> Value
Core.object
      ( [Maybe Pair] -> [Pair]
forall a. [Maybe a] -> [a]
Prelude.catMaybes
          [ (Text
"ExternalImageId" Text -> Text -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=)
              (Text -> Pair) -> Maybe Text -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
externalImageId,
            (Text
"QualityFilter" Text -> QualityFilter -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (QualityFilter -> Pair) -> Maybe QualityFilter -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe QualityFilter
qualityFilter,
            (Text
"MaxFaces" Text -> Natural -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (Natural -> Pair) -> Maybe Natural -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Natural
maxFaces,
            (Text
"DetectionAttributes" Text -> [Attribute] -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=)
              ([Attribute] -> Pair) -> Maybe [Attribute] -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe [Attribute]
detectionAttributes,
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just (Text
"CollectionId" Text -> Text -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= Text
collectionId),
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just (Text
"Image" Text -> Image -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= Image
image)
          ]
      )

instance Core.ToPath IndexFaces where
  toPath :: IndexFaces -> ByteString
toPath = ByteString -> IndexFaces -> ByteString
forall a b. a -> b -> a
Prelude.const ByteString
"/"

instance Core.ToQuery IndexFaces where
  toQuery :: IndexFaces -> QueryString
toQuery = QueryString -> IndexFaces -> QueryString
forall a b. a -> b -> a
Prelude.const QueryString
forall a. Monoid a => a
Prelude.mempty

-- | /See:/ 'newIndexFacesResponse' smart constructor.
data IndexFacesResponse = IndexFacesResponse'
  { -- | The version number of the face detection model that\'s associated with
    -- the input collection (@CollectionId@).
    IndexFacesResponse -> Maybe Text
faceModelVersion :: Prelude.Maybe Prelude.Text,
    -- | An array of faces detected and added to the collection. For more
    -- information, see Searching Faces in a Collection in the Amazon
    -- Rekognition Developer Guide.
    IndexFacesResponse -> Maybe [FaceRecord]
faceRecords :: Prelude.Maybe [FaceRecord],
    -- | If your collection is associated with a face detection model that\'s
    -- later than version 3.0, the value of @OrientationCorrection@ is always
    -- null and no orientation information is returned.
    --
    -- If your collection is associated with a face detection model that\'s
    -- version 3.0 or earlier, the following applies:
    --
    -- -   If the input image is in .jpeg format, it might contain exchangeable
    --     image file format (Exif) metadata that includes the image\'s
    --     orientation. Amazon Rekognition uses this orientation information to
    --     perform image correction - the bounding box coordinates are
    --     translated to represent object locations after the orientation
    --     information in the Exif metadata is used to correct the image
    --     orientation. Images in .png format don\'t contain Exif metadata. The
    --     value of @OrientationCorrection@ is null.
    --
    -- -   If the image doesn\'t contain orientation information in its Exif
    --     metadata, Amazon Rekognition returns an estimated orientation
    --     (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition
    --     doesn’t perform image correction for images. The bounding box
    --     coordinates aren\'t translated and represent the object locations
    --     before the image is rotated.
    --
    -- Bounding box information is returned in the @FaceRecords@ array. You can
    -- get the version of the face detection model by calling
    -- DescribeCollection.
    IndexFacesResponse -> Maybe OrientationCorrection
orientationCorrection :: Prelude.Maybe OrientationCorrection,
    -- | An array of faces that were detected in the image but weren\'t indexed.
    -- They weren\'t indexed because the quality filter identified them as low
    -- quality, or the @MaxFaces@ request parameter filtered them out. To use
    -- the quality filter, you specify the @QualityFilter@ request parameter.
    IndexFacesResponse -> Maybe [UnindexedFace]
unindexedFaces :: Prelude.Maybe [UnindexedFace],
    -- | The response's http status code.
    IndexFacesResponse -> Int
httpStatus :: Prelude.Int
  }
  deriving (IndexFacesResponse -> IndexFacesResponse -> Bool
(IndexFacesResponse -> IndexFacesResponse -> Bool)
-> (IndexFacesResponse -> IndexFacesResponse -> Bool)
-> Eq IndexFacesResponse
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: IndexFacesResponse -> IndexFacesResponse -> Bool
$c/= :: IndexFacesResponse -> IndexFacesResponse -> Bool
== :: IndexFacesResponse -> IndexFacesResponse -> Bool
$c== :: IndexFacesResponse -> IndexFacesResponse -> Bool
Prelude.Eq, ReadPrec [IndexFacesResponse]
ReadPrec IndexFacesResponse
Int -> ReadS IndexFacesResponse
ReadS [IndexFacesResponse]
(Int -> ReadS IndexFacesResponse)
-> ReadS [IndexFacesResponse]
-> ReadPrec IndexFacesResponse
-> ReadPrec [IndexFacesResponse]
-> Read IndexFacesResponse
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [IndexFacesResponse]
$creadListPrec :: ReadPrec [IndexFacesResponse]
readPrec :: ReadPrec IndexFacesResponse
$creadPrec :: ReadPrec IndexFacesResponse
readList :: ReadS [IndexFacesResponse]
$creadList :: ReadS [IndexFacesResponse]
readsPrec :: Int -> ReadS IndexFacesResponse
$creadsPrec :: Int -> ReadS IndexFacesResponse
Prelude.Read, Int -> IndexFacesResponse -> ShowS
[IndexFacesResponse] -> ShowS
IndexFacesResponse -> String
(Int -> IndexFacesResponse -> ShowS)
-> (IndexFacesResponse -> String)
-> ([IndexFacesResponse] -> ShowS)
-> Show IndexFacesResponse
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [IndexFacesResponse] -> ShowS
$cshowList :: [IndexFacesResponse] -> ShowS
show :: IndexFacesResponse -> String
$cshow :: IndexFacesResponse -> String
showsPrec :: Int -> IndexFacesResponse -> ShowS
$cshowsPrec :: Int -> IndexFacesResponse -> ShowS
Prelude.Show, (forall x. IndexFacesResponse -> Rep IndexFacesResponse x)
-> (forall x. Rep IndexFacesResponse x -> IndexFacesResponse)
-> Generic IndexFacesResponse
forall x. Rep IndexFacesResponse x -> IndexFacesResponse
forall x. IndexFacesResponse -> Rep IndexFacesResponse x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep IndexFacesResponse x -> IndexFacesResponse
$cfrom :: forall x. IndexFacesResponse -> Rep IndexFacesResponse x
Prelude.Generic)

-- |
-- Create a value of 'IndexFacesResponse' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'faceModelVersion', 'indexFacesResponse_faceModelVersion' - The version number of the face detection model that\'s associated with
-- the input collection (@CollectionId@).
--
-- 'faceRecords', 'indexFacesResponse_faceRecords' - An array of faces detected and added to the collection. For more
-- information, see Searching Faces in a Collection in the Amazon
-- Rekognition Developer Guide.
--
-- 'orientationCorrection', 'indexFacesResponse_orientationCorrection' - If your collection is associated with a face detection model that\'s
-- later than version 3.0, the value of @OrientationCorrection@ is always
-- null and no orientation information is returned.
--
-- If your collection is associated with a face detection model that\'s
-- version 3.0 or earlier, the following applies:
--
-- -   If the input image is in .jpeg format, it might contain exchangeable
--     image file format (Exif) metadata that includes the image\'s
--     orientation. Amazon Rekognition uses this orientation information to
--     perform image correction - the bounding box coordinates are
--     translated to represent object locations after the orientation
--     information in the Exif metadata is used to correct the image
--     orientation. Images in .png format don\'t contain Exif metadata. The
--     value of @OrientationCorrection@ is null.
--
-- -   If the image doesn\'t contain orientation information in its Exif
--     metadata, Amazon Rekognition returns an estimated orientation
--     (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition
--     doesn’t perform image correction for images. The bounding box
--     coordinates aren\'t translated and represent the object locations
--     before the image is rotated.
--
-- Bounding box information is returned in the @FaceRecords@ array. You can
-- get the version of the face detection model by calling
-- DescribeCollection.
--
-- 'unindexedFaces', 'indexFacesResponse_unindexedFaces' - An array of faces that were detected in the image but weren\'t indexed.
-- They weren\'t indexed because the quality filter identified them as low
-- quality, or the @MaxFaces@ request parameter filtered them out. To use
-- the quality filter, you specify the @QualityFilter@ request parameter.
--
-- 'httpStatus', 'indexFacesResponse_httpStatus' - The response's http status code.
newIndexFacesResponse ::
  -- | 'httpStatus'
  Prelude.Int ->
  IndexFacesResponse
newIndexFacesResponse :: Int -> IndexFacesResponse
newIndexFacesResponse Int
pHttpStatus_ =
  IndexFacesResponse' :: Maybe Text
-> Maybe [FaceRecord]
-> Maybe OrientationCorrection
-> Maybe [UnindexedFace]
-> Int
-> IndexFacesResponse
IndexFacesResponse'
    { $sel:faceModelVersion:IndexFacesResponse' :: Maybe Text
faceModelVersion =
        Maybe Text
forall a. Maybe a
Prelude.Nothing,
      $sel:faceRecords:IndexFacesResponse' :: Maybe [FaceRecord]
faceRecords = Maybe [FaceRecord]
forall a. Maybe a
Prelude.Nothing,
      $sel:orientationCorrection:IndexFacesResponse' :: Maybe OrientationCorrection
orientationCorrection = Maybe OrientationCorrection
forall a. Maybe a
Prelude.Nothing,
      $sel:unindexedFaces:IndexFacesResponse' :: Maybe [UnindexedFace]
unindexedFaces = Maybe [UnindexedFace]
forall a. Maybe a
Prelude.Nothing,
      $sel:httpStatus:IndexFacesResponse' :: Int
httpStatus = Int
pHttpStatus_
    }

-- | The version number of the face detection model that\'s associated with
-- the input collection (@CollectionId@).
indexFacesResponse_faceModelVersion :: Lens.Lens' IndexFacesResponse (Prelude.Maybe Prelude.Text)
indexFacesResponse_faceModelVersion :: (Maybe Text -> f (Maybe Text))
-> IndexFacesResponse -> f IndexFacesResponse
indexFacesResponse_faceModelVersion = (IndexFacesResponse -> Maybe Text)
-> (IndexFacesResponse -> Maybe Text -> IndexFacesResponse)
-> Lens
     IndexFacesResponse IndexFacesResponse (Maybe Text) (Maybe Text)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFacesResponse' {Maybe Text
faceModelVersion :: Maybe Text
$sel:faceModelVersion:IndexFacesResponse' :: IndexFacesResponse -> Maybe Text
faceModelVersion} -> Maybe Text
faceModelVersion) (\s :: IndexFacesResponse
s@IndexFacesResponse' {} Maybe Text
a -> IndexFacesResponse
s {$sel:faceModelVersion:IndexFacesResponse' :: Maybe Text
faceModelVersion = Maybe Text
a} :: IndexFacesResponse)

-- | An array of faces detected and added to the collection. For more
-- information, see Searching Faces in a Collection in the Amazon
-- Rekognition Developer Guide.
indexFacesResponse_faceRecords :: Lens.Lens' IndexFacesResponse (Prelude.Maybe [FaceRecord])
indexFacesResponse_faceRecords :: (Maybe [FaceRecord] -> f (Maybe [FaceRecord]))
-> IndexFacesResponse -> f IndexFacesResponse
indexFacesResponse_faceRecords = (IndexFacesResponse -> Maybe [FaceRecord])
-> (IndexFacesResponse -> Maybe [FaceRecord] -> IndexFacesResponse)
-> Lens
     IndexFacesResponse
     IndexFacesResponse
     (Maybe [FaceRecord])
     (Maybe [FaceRecord])
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFacesResponse' {Maybe [FaceRecord]
faceRecords :: Maybe [FaceRecord]
$sel:faceRecords:IndexFacesResponse' :: IndexFacesResponse -> Maybe [FaceRecord]
faceRecords} -> Maybe [FaceRecord]
faceRecords) (\s :: IndexFacesResponse
s@IndexFacesResponse' {} Maybe [FaceRecord]
a -> IndexFacesResponse
s {$sel:faceRecords:IndexFacesResponse' :: Maybe [FaceRecord]
faceRecords = Maybe [FaceRecord]
a} :: IndexFacesResponse) ((Maybe [FaceRecord] -> f (Maybe [FaceRecord]))
 -> IndexFacesResponse -> f IndexFacesResponse)
-> ((Maybe [FaceRecord] -> f (Maybe [FaceRecord]))
    -> Maybe [FaceRecord] -> f (Maybe [FaceRecord]))
-> (Maybe [FaceRecord] -> f (Maybe [FaceRecord]))
-> IndexFacesResponse
-> f IndexFacesResponse
forall b c a. (b -> c) -> (a -> b) -> a -> c
Prelude.. AnIso [FaceRecord] [FaceRecord] [FaceRecord] [FaceRecord]
-> Iso
     (Maybe [FaceRecord])
     (Maybe [FaceRecord])
     (Maybe [FaceRecord])
     (Maybe [FaceRecord])
forall (f :: * -> *) (g :: * -> *) s t a b.
(Functor f, Functor g) =>
AnIso s t a b -> Iso (f s) (g t) (f a) (g b)
Lens.mapping AnIso [FaceRecord] [FaceRecord] [FaceRecord] [FaceRecord]
forall s t a b. (Coercible s a, Coercible t b) => Iso s t a b
Lens.coerced

-- | If your collection is associated with a face detection model that\'s
-- later than version 3.0, the value of @OrientationCorrection@ is always
-- null and no orientation information is returned.
--
-- If your collection is associated with a face detection model that\'s
-- version 3.0 or earlier, the following applies:
--
-- -   If the input image is in .jpeg format, it might contain exchangeable
--     image file format (Exif) metadata that includes the image\'s
--     orientation. Amazon Rekognition uses this orientation information to
--     perform image correction - the bounding box coordinates are
--     translated to represent object locations after the orientation
--     information in the Exif metadata is used to correct the image
--     orientation. Images in .png format don\'t contain Exif metadata. The
--     value of @OrientationCorrection@ is null.
--
-- -   If the image doesn\'t contain orientation information in its Exif
--     metadata, Amazon Rekognition returns an estimated orientation
--     (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition
--     doesn’t perform image correction for images. The bounding box
--     coordinates aren\'t translated and represent the object locations
--     before the image is rotated.
--
-- Bounding box information is returned in the @FaceRecords@ array. You can
-- get the version of the face detection model by calling
-- DescribeCollection.
indexFacesResponse_orientationCorrection :: Lens.Lens' IndexFacesResponse (Prelude.Maybe OrientationCorrection)
indexFacesResponse_orientationCorrection :: (Maybe OrientationCorrection -> f (Maybe OrientationCorrection))
-> IndexFacesResponse -> f IndexFacesResponse
indexFacesResponse_orientationCorrection = (IndexFacesResponse -> Maybe OrientationCorrection)
-> (IndexFacesResponse
    -> Maybe OrientationCorrection -> IndexFacesResponse)
-> Lens
     IndexFacesResponse
     IndexFacesResponse
     (Maybe OrientationCorrection)
     (Maybe OrientationCorrection)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFacesResponse' {Maybe OrientationCorrection
orientationCorrection :: Maybe OrientationCorrection
$sel:orientationCorrection:IndexFacesResponse' :: IndexFacesResponse -> Maybe OrientationCorrection
orientationCorrection} -> Maybe OrientationCorrection
orientationCorrection) (\s :: IndexFacesResponse
s@IndexFacesResponse' {} Maybe OrientationCorrection
a -> IndexFacesResponse
s {$sel:orientationCorrection:IndexFacesResponse' :: Maybe OrientationCorrection
orientationCorrection = Maybe OrientationCorrection
a} :: IndexFacesResponse)

-- | An array of faces that were detected in the image but weren\'t indexed.
-- They weren\'t indexed because the quality filter identified them as low
-- quality, or the @MaxFaces@ request parameter filtered them out. To use
-- the quality filter, you specify the @QualityFilter@ request parameter.
indexFacesResponse_unindexedFaces :: Lens.Lens' IndexFacesResponse (Prelude.Maybe [UnindexedFace])
indexFacesResponse_unindexedFaces :: (Maybe [UnindexedFace] -> f (Maybe [UnindexedFace]))
-> IndexFacesResponse -> f IndexFacesResponse
indexFacesResponse_unindexedFaces = (IndexFacesResponse -> Maybe [UnindexedFace])
-> (IndexFacesResponse
    -> Maybe [UnindexedFace] -> IndexFacesResponse)
-> Lens
     IndexFacesResponse
     IndexFacesResponse
     (Maybe [UnindexedFace])
     (Maybe [UnindexedFace])
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFacesResponse' {Maybe [UnindexedFace]
unindexedFaces :: Maybe [UnindexedFace]
$sel:unindexedFaces:IndexFacesResponse' :: IndexFacesResponse -> Maybe [UnindexedFace]
unindexedFaces} -> Maybe [UnindexedFace]
unindexedFaces) (\s :: IndexFacesResponse
s@IndexFacesResponse' {} Maybe [UnindexedFace]
a -> IndexFacesResponse
s {$sel:unindexedFaces:IndexFacesResponse' :: Maybe [UnindexedFace]
unindexedFaces = Maybe [UnindexedFace]
a} :: IndexFacesResponse) ((Maybe [UnindexedFace] -> f (Maybe [UnindexedFace]))
 -> IndexFacesResponse -> f IndexFacesResponse)
-> ((Maybe [UnindexedFace] -> f (Maybe [UnindexedFace]))
    -> Maybe [UnindexedFace] -> f (Maybe [UnindexedFace]))
-> (Maybe [UnindexedFace] -> f (Maybe [UnindexedFace]))
-> IndexFacesResponse
-> f IndexFacesResponse
forall b c a. (b -> c) -> (a -> b) -> a -> c
Prelude.. AnIso
  [UnindexedFace] [UnindexedFace] [UnindexedFace] [UnindexedFace]
-> Iso
     (Maybe [UnindexedFace])
     (Maybe [UnindexedFace])
     (Maybe [UnindexedFace])
     (Maybe [UnindexedFace])
forall (f :: * -> *) (g :: * -> *) s t a b.
(Functor f, Functor g) =>
AnIso s t a b -> Iso (f s) (g t) (f a) (g b)
Lens.mapping AnIso
  [UnindexedFace] [UnindexedFace] [UnindexedFace] [UnindexedFace]
forall s t a b. (Coercible s a, Coercible t b) => Iso s t a b
Lens.coerced

-- | The response's http status code.
indexFacesResponse_httpStatus :: Lens.Lens' IndexFacesResponse Prelude.Int
indexFacesResponse_httpStatus :: (Int -> f Int) -> IndexFacesResponse -> f IndexFacesResponse
indexFacesResponse_httpStatus = (IndexFacesResponse -> Int)
-> (IndexFacesResponse -> Int -> IndexFacesResponse)
-> Lens IndexFacesResponse IndexFacesResponse Int Int
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\IndexFacesResponse' {Int
httpStatus :: Int
$sel:httpStatus:IndexFacesResponse' :: IndexFacesResponse -> Int
httpStatus} -> Int
httpStatus) (\s :: IndexFacesResponse
s@IndexFacesResponse' {} Int
a -> IndexFacesResponse
s {$sel:httpStatus:IndexFacesResponse' :: Int
httpStatus = Int
a} :: IndexFacesResponse)

instance Prelude.NFData IndexFacesResponse