Copyright | (c) 2013-2021 Brendan Hay |
---|---|
License | Mozilla Public License, v. 2.0. |
Maintainer | Brendan Hay <brendan.g.hay+amazonka@gmail.com> |
Stability | auto-generated |
Portability | non-portable (GHC extensions) |
Safe Haskell | None |
- Service Configuration
- Errors
- Attribute
- BodyPart
- CelebrityRecognitionSortBy
- ContentClassifier
- ContentModerationSortBy
- EmotionName
- FaceAttributes
- FaceSearchSortBy
- GenderType
- KnownGenderType
- LabelDetectionSortBy
- LandmarkType
- OrientationCorrection
- PersonTrackingSortBy
- ProjectStatus
- ProjectVersionStatus
- ProtectiveEquipmentType
- QualityFilter
- Reason
- SegmentType
- StreamProcessorStatus
- TechnicalCueType
- TextTypes
- VideoColorRange
- VideoJobStatus
- AgeRange
- Asset
- AudioMetadata
- Beard
- BlackFrame
- BoundingBox
- Celebrity
- CelebrityDetail
- CelebrityRecognition
- CompareFacesMatch
- ComparedFace
- ComparedSourceImageFace
- ContentModerationDetection
- CoversBodyPart
- CustomLabel
- DetectTextFilters
- DetectionFilter
- Emotion
- EquipmentDetection
- EvaluationResult
- EyeOpen
- Eyeglasses
- Face
- FaceDetail
- FaceDetection
- FaceMatch
- FaceRecord
- FaceSearchSettings
- Gender
- Geometry
- GroundTruthManifest
- HumanLoopActivationOutput
- HumanLoopConfig
- HumanLoopDataAttributes
- Image
- ImageQuality
- Instance
- KinesisDataStream
- KinesisVideoStream
- KnownGender
- Label
- LabelDetection
- Landmark
- ModerationLabel
- MouthOpen
- Mustache
- NotificationChannel
- OutputConfig
- Parent
- PersonDetail
- PersonDetection
- PersonMatch
- Point
- Pose
- ProjectDescription
- ProjectVersionDescription
- ProtectiveEquipmentBodyPart
- ProtectiveEquipmentPerson
- ProtectiveEquipmentSummarizationAttributes
- ProtectiveEquipmentSummary
- RegionOfInterest
- S3Object
- SegmentDetection
- SegmentTypeInfo
- ShotSegment
- Smile
- StartSegmentDetectionFilters
- StartShotDetectionFilter
- StartTechnicalCueDetectionFilter
- StartTextDetectionFilters
- StreamProcessor
- StreamProcessorInput
- StreamProcessorOutput
- StreamProcessorSettings
- Summary
- Sunglasses
- TechnicalCueSegment
- TestingData
- TestingDataResult
- TextDetection
- TextDetectionResult
- TrainingData
- TrainingDataResult
- UnindexedFace
- ValidationData
- Video
- VideoMetadata
Synopsis
- defaultService :: Service
- _AccessDeniedException :: AsError a => Getting (First ServiceError) a ServiceError
- _VideoTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError
- _InvalidParameterException :: AsError a => Getting (First ServiceError) a ServiceError
- _InvalidImageFormatException :: AsError a => Getting (First ServiceError) a ServiceError
- _ResourceAlreadyExistsException :: AsError a => Getting (First ServiceError) a ServiceError
- _InvalidS3ObjectException :: AsError a => Getting (First ServiceError) a ServiceError
- _ProvisionedThroughputExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- _ImageTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError
- _ServiceQuotaExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- _ThrottlingException :: AsError a => Getting (First ServiceError) a ServiceError
- _InternalServerError :: AsError a => Getting (First ServiceError) a ServiceError
- _IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError
- _ResourceNotReadyException :: AsError a => Getting (First ServiceError) a ServiceError
- _ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError
- _HumanLoopQuotaExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- _InvalidPaginationTokenException :: AsError a => Getting (First ServiceError) a ServiceError
- _LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- _ResourceInUseException :: AsError a => Getting (First ServiceError) a ServiceError
- newtype Attribute where
- Attribute' { }
- pattern Attribute_ALL :: Attribute
- pattern Attribute_DEFAULT :: Attribute
- newtype BodyPart where
- BodyPart' {
- fromBodyPart :: Text
- pattern BodyPart_FACE :: BodyPart
- pattern BodyPart_HEAD :: BodyPart
- pattern BodyPart_LEFT_HAND :: BodyPart
- pattern BodyPart_RIGHT_HAND :: BodyPart
- BodyPart' {
- newtype CelebrityRecognitionSortBy where
- newtype ContentClassifier where
- newtype ContentModerationSortBy where
- newtype EmotionName where
- EmotionName' { }
- pattern EmotionName_ANGRY :: EmotionName
- pattern EmotionName_CALM :: EmotionName
- pattern EmotionName_CONFUSED :: EmotionName
- pattern EmotionName_DISGUSTED :: EmotionName
- pattern EmotionName_FEAR :: EmotionName
- pattern EmotionName_HAPPY :: EmotionName
- pattern EmotionName_SAD :: EmotionName
- pattern EmotionName_SURPRISED :: EmotionName
- pattern EmotionName_UNKNOWN :: EmotionName
- newtype FaceAttributes where
- FaceAttributes' { }
- pattern FaceAttributes_ALL :: FaceAttributes
- pattern FaceAttributes_DEFAULT :: FaceAttributes
- newtype FaceSearchSortBy where
- FaceSearchSortBy' { }
- pattern FaceSearchSortBy_INDEX :: FaceSearchSortBy
- pattern FaceSearchSortBy_TIMESTAMP :: FaceSearchSortBy
- newtype GenderType where
- GenderType' { }
- pattern GenderType_Female :: GenderType
- pattern GenderType_Male :: GenderType
- newtype KnownGenderType where
- KnownGenderType' { }
- pattern KnownGenderType_Female :: KnownGenderType
- pattern KnownGenderType_Male :: KnownGenderType
- newtype LabelDetectionSortBy where
- newtype LandmarkType where
- LandmarkType' { }
- pattern LandmarkType_ChinBottom :: LandmarkType
- pattern LandmarkType_EyeLeft :: LandmarkType
- pattern LandmarkType_EyeRight :: LandmarkType
- pattern LandmarkType_LeftEyeBrowLeft :: LandmarkType
- pattern LandmarkType_LeftEyeBrowRight :: LandmarkType
- pattern LandmarkType_LeftEyeBrowUp :: LandmarkType
- pattern LandmarkType_LeftEyeDown :: LandmarkType
- pattern LandmarkType_LeftEyeLeft :: LandmarkType
- pattern LandmarkType_LeftEyeRight :: LandmarkType
- pattern LandmarkType_LeftEyeUp :: LandmarkType
- pattern LandmarkType_LeftPupil :: LandmarkType
- pattern LandmarkType_MidJawlineLeft :: LandmarkType
- pattern LandmarkType_MidJawlineRight :: LandmarkType
- pattern LandmarkType_MouthDown :: LandmarkType
- pattern LandmarkType_MouthLeft :: LandmarkType
- pattern LandmarkType_MouthRight :: LandmarkType
- pattern LandmarkType_MouthUp :: LandmarkType
- pattern LandmarkType_Nose :: LandmarkType
- pattern LandmarkType_NoseLeft :: LandmarkType
- pattern LandmarkType_NoseRight :: LandmarkType
- pattern LandmarkType_RightEyeBrowLeft :: LandmarkType
- pattern LandmarkType_RightEyeBrowRight :: LandmarkType
- pattern LandmarkType_RightEyeBrowUp :: LandmarkType
- pattern LandmarkType_RightEyeDown :: LandmarkType
- pattern LandmarkType_RightEyeLeft :: LandmarkType
- pattern LandmarkType_RightEyeRight :: LandmarkType
- pattern LandmarkType_RightEyeUp :: LandmarkType
- pattern LandmarkType_RightPupil :: LandmarkType
- pattern LandmarkType_UpperJawlineLeft :: LandmarkType
- pattern LandmarkType_UpperJawlineRight :: LandmarkType
- newtype OrientationCorrection where
- newtype PersonTrackingSortBy where
- newtype ProjectStatus where
- ProjectStatus' { }
- pattern ProjectStatus_CREATED :: ProjectStatus
- pattern ProjectStatus_CREATING :: ProjectStatus
- pattern ProjectStatus_DELETING :: ProjectStatus
- newtype ProjectVersionStatus where
- ProjectVersionStatus' { }
- pattern ProjectVersionStatus_DELETING :: ProjectVersionStatus
- pattern ProjectVersionStatus_FAILED :: ProjectVersionStatus
- pattern ProjectVersionStatus_RUNNING :: ProjectVersionStatus
- pattern ProjectVersionStatus_STARTING :: ProjectVersionStatus
- pattern ProjectVersionStatus_STOPPED :: ProjectVersionStatus
- pattern ProjectVersionStatus_STOPPING :: ProjectVersionStatus
- pattern ProjectVersionStatus_TRAINING_COMPLETED :: ProjectVersionStatus
- pattern ProjectVersionStatus_TRAINING_FAILED :: ProjectVersionStatus
- pattern ProjectVersionStatus_TRAINING_IN_PROGRESS :: ProjectVersionStatus
- newtype ProtectiveEquipmentType where
- newtype QualityFilter where
- QualityFilter' { }
- pattern QualityFilter_AUTO :: QualityFilter
- pattern QualityFilter_HIGH :: QualityFilter
- pattern QualityFilter_LOW :: QualityFilter
- pattern QualityFilter_MEDIUM :: QualityFilter
- pattern QualityFilter_NONE :: QualityFilter
- newtype Reason where
- Reason' {
- fromReason :: Text
- pattern Reason_EXCEEDS_MAX_FACES :: Reason
- pattern Reason_EXTREME_POSE :: Reason
- pattern Reason_LOW_BRIGHTNESS :: Reason
- pattern Reason_LOW_CONFIDENCE :: Reason
- pattern Reason_LOW_FACE_QUALITY :: Reason
- pattern Reason_LOW_SHARPNESS :: Reason
- pattern Reason_SMALL_BOUNDING_BOX :: Reason
- Reason' {
- newtype SegmentType where
- SegmentType' { }
- pattern SegmentType_SHOT :: SegmentType
- pattern SegmentType_TECHNICAL_CUE :: SegmentType
- newtype StreamProcessorStatus where
- StreamProcessorStatus' { }
- pattern StreamProcessorStatus_FAILED :: StreamProcessorStatus
- pattern StreamProcessorStatus_RUNNING :: StreamProcessorStatus
- pattern StreamProcessorStatus_STARTING :: StreamProcessorStatus
- pattern StreamProcessorStatus_STOPPED :: StreamProcessorStatus
- pattern StreamProcessorStatus_STOPPING :: StreamProcessorStatus
- newtype TechnicalCueType where
- TechnicalCueType' { }
- pattern TechnicalCueType_BlackFrames :: TechnicalCueType
- pattern TechnicalCueType_ColorBars :: TechnicalCueType
- pattern TechnicalCueType_Content :: TechnicalCueType
- pattern TechnicalCueType_EndCredits :: TechnicalCueType
- pattern TechnicalCueType_OpeningCredits :: TechnicalCueType
- pattern TechnicalCueType_Slate :: TechnicalCueType
- pattern TechnicalCueType_StudioLogo :: TechnicalCueType
- newtype TextTypes where
- TextTypes' { }
- pattern TextTypes_LINE :: TextTypes
- pattern TextTypes_WORD :: TextTypes
- newtype VideoColorRange where
- VideoColorRange' { }
- pattern VideoColorRange_FULL :: VideoColorRange
- pattern VideoColorRange_LIMITED :: VideoColorRange
- newtype VideoJobStatus where
- VideoJobStatus' { }
- pattern VideoJobStatus_FAILED :: VideoJobStatus
- pattern VideoJobStatus_IN_PROGRESS :: VideoJobStatus
- pattern VideoJobStatus_SUCCEEDED :: VideoJobStatus
- data AgeRange = AgeRange' {}
- newAgeRange :: AgeRange
- ageRange_low :: Lens' AgeRange (Maybe Natural)
- ageRange_high :: Lens' AgeRange (Maybe Natural)
- data Asset = Asset' {}
- newAsset :: Asset
- asset_groundTruthManifest :: Lens' Asset (Maybe GroundTruthManifest)
- data AudioMetadata = AudioMetadata' {}
- newAudioMetadata :: AudioMetadata
- audioMetadata_codec :: Lens' AudioMetadata (Maybe Text)
- audioMetadata_sampleRate :: Lens' AudioMetadata (Maybe Natural)
- audioMetadata_numberOfChannels :: Lens' AudioMetadata (Maybe Natural)
- audioMetadata_durationMillis :: Lens' AudioMetadata (Maybe Natural)
- data Beard = Beard' {}
- newBeard :: Beard
- beard_value :: Lens' Beard (Maybe Bool)
- beard_confidence :: Lens' Beard (Maybe Double)
- data BlackFrame = BlackFrame' {}
- newBlackFrame :: BlackFrame
- blackFrame_maxPixelThreshold :: Lens' BlackFrame (Maybe Double)
- blackFrame_minCoveragePercentage :: Lens' BlackFrame (Maybe Double)
- data BoundingBox = BoundingBox' {}
- newBoundingBox :: BoundingBox
- boundingBox_height :: Lens' BoundingBox (Maybe Double)
- boundingBox_left :: Lens' BoundingBox (Maybe Double)
- boundingBox_width :: Lens' BoundingBox (Maybe Double)
- boundingBox_top :: Lens' BoundingBox (Maybe Double)
- data Celebrity = Celebrity' {
- matchConfidence :: Maybe Double
- urls :: Maybe [Text]
- knownGender :: Maybe KnownGender
- name :: Maybe Text
- id :: Maybe Text
- face :: Maybe ComparedFace
- newCelebrity :: Celebrity
- celebrity_matchConfidence :: Lens' Celebrity (Maybe Double)
- celebrity_urls :: Lens' Celebrity (Maybe [Text])
- celebrity_knownGender :: Lens' Celebrity (Maybe KnownGender)
- celebrity_name :: Lens' Celebrity (Maybe Text)
- celebrity_id :: Lens' Celebrity (Maybe Text)
- celebrity_face :: Lens' Celebrity (Maybe ComparedFace)
- data CelebrityDetail = CelebrityDetail' {
- boundingBox :: Maybe BoundingBox
- urls :: Maybe [Text]
- confidence :: Maybe Double
- name :: Maybe Text
- id :: Maybe Text
- face :: Maybe FaceDetail
- newCelebrityDetail :: CelebrityDetail
- celebrityDetail_boundingBox :: Lens' CelebrityDetail (Maybe BoundingBox)
- celebrityDetail_urls :: Lens' CelebrityDetail (Maybe [Text])
- celebrityDetail_confidence :: Lens' CelebrityDetail (Maybe Double)
- celebrityDetail_name :: Lens' CelebrityDetail (Maybe Text)
- celebrityDetail_id :: Lens' CelebrityDetail (Maybe Text)
- celebrityDetail_face :: Lens' CelebrityDetail (Maybe FaceDetail)
- data CelebrityRecognition = CelebrityRecognition' {}
- newCelebrityRecognition :: CelebrityRecognition
- celebrityRecognition_celebrity :: Lens' CelebrityRecognition (Maybe CelebrityDetail)
- celebrityRecognition_timestamp :: Lens' CelebrityRecognition (Maybe Integer)
- data CompareFacesMatch = CompareFacesMatch' {}
- newCompareFacesMatch :: CompareFacesMatch
- compareFacesMatch_similarity :: Lens' CompareFacesMatch (Maybe Double)
- compareFacesMatch_face :: Lens' CompareFacesMatch (Maybe ComparedFace)
- data ComparedFace = ComparedFace' {}
- newComparedFace :: ComparedFace
- comparedFace_boundingBox :: Lens' ComparedFace (Maybe BoundingBox)
- comparedFace_emotions :: Lens' ComparedFace (Maybe [Emotion])
- comparedFace_pose :: Lens' ComparedFace (Maybe Pose)
- comparedFace_confidence :: Lens' ComparedFace (Maybe Double)
- comparedFace_quality :: Lens' ComparedFace (Maybe ImageQuality)
- comparedFace_smile :: Lens' ComparedFace (Maybe Smile)
- comparedFace_landmarks :: Lens' ComparedFace (Maybe [Landmark])
- data ComparedSourceImageFace = ComparedSourceImageFace' {}
- newComparedSourceImageFace :: ComparedSourceImageFace
- comparedSourceImageFace_boundingBox :: Lens' ComparedSourceImageFace (Maybe BoundingBox)
- comparedSourceImageFace_confidence :: Lens' ComparedSourceImageFace (Maybe Double)
- data ContentModerationDetection = ContentModerationDetection' {}
- newContentModerationDetection :: ContentModerationDetection
- contentModerationDetection_moderationLabel :: Lens' ContentModerationDetection (Maybe ModerationLabel)
- contentModerationDetection_timestamp :: Lens' ContentModerationDetection (Maybe Integer)
- data CoversBodyPart = CoversBodyPart' {}
- newCoversBodyPart :: CoversBodyPart
- coversBodyPart_value :: Lens' CoversBodyPart (Maybe Bool)
- coversBodyPart_confidence :: Lens' CoversBodyPart (Maybe Double)
- data CustomLabel = CustomLabel' {}
- newCustomLabel :: CustomLabel
- customLabel_confidence :: Lens' CustomLabel (Maybe Double)
- customLabel_name :: Lens' CustomLabel (Maybe Text)
- customLabel_geometry :: Lens' CustomLabel (Maybe Geometry)
- data DetectTextFilters = DetectTextFilters' {}
- newDetectTextFilters :: DetectTextFilters
- detectTextFilters_regionsOfInterest :: Lens' DetectTextFilters (Maybe [RegionOfInterest])
- detectTextFilters_wordFilter :: Lens' DetectTextFilters (Maybe DetectionFilter)
- data DetectionFilter = DetectionFilter' {}
- newDetectionFilter :: DetectionFilter
- detectionFilter_minBoundingBoxHeight :: Lens' DetectionFilter (Maybe Double)
- detectionFilter_minBoundingBoxWidth :: Lens' DetectionFilter (Maybe Double)
- detectionFilter_minConfidence :: Lens' DetectionFilter (Maybe Double)
- data Emotion = Emotion' {}
- newEmotion :: Emotion
- emotion_confidence :: Lens' Emotion (Maybe Double)
- emotion_type :: Lens' Emotion (Maybe EmotionName)
- data EquipmentDetection = EquipmentDetection' {}
- newEquipmentDetection :: EquipmentDetection
- equipmentDetection_boundingBox :: Lens' EquipmentDetection (Maybe BoundingBox)
- equipmentDetection_coversBodyPart :: Lens' EquipmentDetection (Maybe CoversBodyPart)
- equipmentDetection_confidence :: Lens' EquipmentDetection (Maybe Double)
- equipmentDetection_type :: Lens' EquipmentDetection (Maybe ProtectiveEquipmentType)
- data EvaluationResult = EvaluationResult' {}
- newEvaluationResult :: EvaluationResult
- evaluationResult_summary :: Lens' EvaluationResult (Maybe Summary)
- evaluationResult_f1Score :: Lens' EvaluationResult (Maybe Double)
- data EyeOpen = EyeOpen' {}
- newEyeOpen :: EyeOpen
- eyeOpen_value :: Lens' EyeOpen (Maybe Bool)
- eyeOpen_confidence :: Lens' EyeOpen (Maybe Double)
- data Eyeglasses = Eyeglasses' {}
- newEyeglasses :: Eyeglasses
- eyeglasses_value :: Lens' Eyeglasses (Maybe Bool)
- eyeglasses_confidence :: Lens' Eyeglasses (Maybe Double)
- data Face = Face' {}
- newFace :: Face
- face_faceId :: Lens' Face (Maybe Text)
- face_boundingBox :: Lens' Face (Maybe BoundingBox)
- face_externalImageId :: Lens' Face (Maybe Text)
- face_confidence :: Lens' Face (Maybe Double)
- face_imageId :: Lens' Face (Maybe Text)
- data FaceDetail = FaceDetail' {
- ageRange :: Maybe AgeRange
- sunglasses :: Maybe Sunglasses
- mouthOpen :: Maybe MouthOpen
- boundingBox :: Maybe BoundingBox
- emotions :: Maybe [Emotion]
- eyesOpen :: Maybe EyeOpen
- pose :: Maybe Pose
- confidence :: Maybe Double
- gender :: Maybe Gender
- quality :: Maybe ImageQuality
- eyeglasses :: Maybe Eyeglasses
- beard :: Maybe Beard
- mustache :: Maybe Mustache
- smile :: Maybe Smile
- landmarks :: Maybe [Landmark]
- newFaceDetail :: FaceDetail
- faceDetail_ageRange :: Lens' FaceDetail (Maybe AgeRange)
- faceDetail_sunglasses :: Lens' FaceDetail (Maybe Sunglasses)
- faceDetail_mouthOpen :: Lens' FaceDetail (Maybe MouthOpen)
- faceDetail_boundingBox :: Lens' FaceDetail (Maybe BoundingBox)
- faceDetail_emotions :: Lens' FaceDetail (Maybe [Emotion])
- faceDetail_eyesOpen :: Lens' FaceDetail (Maybe EyeOpen)
- faceDetail_pose :: Lens' FaceDetail (Maybe Pose)
- faceDetail_confidence :: Lens' FaceDetail (Maybe Double)
- faceDetail_gender :: Lens' FaceDetail (Maybe Gender)
- faceDetail_quality :: Lens' FaceDetail (Maybe ImageQuality)
- faceDetail_eyeglasses :: Lens' FaceDetail (Maybe Eyeglasses)
- faceDetail_beard :: Lens' FaceDetail (Maybe Beard)
- faceDetail_mustache :: Lens' FaceDetail (Maybe Mustache)
- faceDetail_smile :: Lens' FaceDetail (Maybe Smile)
- faceDetail_landmarks :: Lens' FaceDetail (Maybe [Landmark])
- data FaceDetection = FaceDetection' {}
- newFaceDetection :: FaceDetection
- faceDetection_timestamp :: Lens' FaceDetection (Maybe Integer)
- faceDetection_face :: Lens' FaceDetection (Maybe FaceDetail)
- data FaceMatch = FaceMatch' {}
- newFaceMatch :: FaceMatch
- faceMatch_similarity :: Lens' FaceMatch (Maybe Double)
- faceMatch_face :: Lens' FaceMatch (Maybe Face)
- data FaceRecord = FaceRecord' {
- faceDetail :: Maybe FaceDetail
- face :: Maybe Face
- newFaceRecord :: FaceRecord
- faceRecord_faceDetail :: Lens' FaceRecord (Maybe FaceDetail)
- faceRecord_face :: Lens' FaceRecord (Maybe Face)
- data FaceSearchSettings = FaceSearchSettings' {}
- newFaceSearchSettings :: FaceSearchSettings
- faceSearchSettings_faceMatchThreshold :: Lens' FaceSearchSettings (Maybe Double)
- faceSearchSettings_collectionId :: Lens' FaceSearchSettings (Maybe Text)
- data Gender = Gender' {}
- newGender :: Gender
- gender_value :: Lens' Gender (Maybe GenderType)
- gender_confidence :: Lens' Gender (Maybe Double)
- data Geometry = Geometry' {
- boundingBox :: Maybe BoundingBox
- polygon :: Maybe [Point]
- newGeometry :: Geometry
- geometry_boundingBox :: Lens' Geometry (Maybe BoundingBox)
- geometry_polygon :: Lens' Geometry (Maybe [Point])
- data GroundTruthManifest = GroundTruthManifest' {}
- newGroundTruthManifest :: GroundTruthManifest
- groundTruthManifest_s3Object :: Lens' GroundTruthManifest (Maybe S3Object)
- data HumanLoopActivationOutput = HumanLoopActivationOutput' {}
- newHumanLoopActivationOutput :: HumanLoopActivationOutput
- humanLoopActivationOutput_humanLoopActivationReasons :: Lens' HumanLoopActivationOutput (Maybe (NonEmpty Text))
- humanLoopActivationOutput_humanLoopArn :: Lens' HumanLoopActivationOutput (Maybe Text)
- humanLoopActivationOutput_humanLoopActivationConditionsEvaluationResults :: Lens' HumanLoopActivationOutput (Maybe Text)
- data HumanLoopConfig = HumanLoopConfig' {}
- newHumanLoopConfig :: Text -> Text -> HumanLoopConfig
- humanLoopConfig_dataAttributes :: Lens' HumanLoopConfig (Maybe HumanLoopDataAttributes)
- humanLoopConfig_humanLoopName :: Lens' HumanLoopConfig Text
- humanLoopConfig_flowDefinitionArn :: Lens' HumanLoopConfig Text
- data HumanLoopDataAttributes = HumanLoopDataAttributes' {}
- newHumanLoopDataAttributes :: HumanLoopDataAttributes
- humanLoopDataAttributes_contentClassifiers :: Lens' HumanLoopDataAttributes (Maybe [ContentClassifier])
- data Image = Image' {}
- newImage :: Image
- image_s3Object :: Lens' Image (Maybe S3Object)
- image_bytes :: Lens' Image (Maybe ByteString)
- data ImageQuality = ImageQuality' {}
- newImageQuality :: ImageQuality
- imageQuality_sharpness :: Lens' ImageQuality (Maybe Double)
- imageQuality_brightness :: Lens' ImageQuality (Maybe Double)
- data Instance = Instance' {}
- newInstance :: Instance
- instance_boundingBox :: Lens' Instance (Maybe BoundingBox)
- instance_confidence :: Lens' Instance (Maybe Double)
- data KinesisDataStream = KinesisDataStream' {}
- newKinesisDataStream :: KinesisDataStream
- kinesisDataStream_arn :: Lens' KinesisDataStream (Maybe Text)
- data KinesisVideoStream = KinesisVideoStream' {}
- newKinesisVideoStream :: KinesisVideoStream
- kinesisVideoStream_arn :: Lens' KinesisVideoStream (Maybe Text)
- data KnownGender = KnownGender' {}
- newKnownGender :: KnownGender
- knownGender_type :: Lens' KnownGender (Maybe KnownGenderType)
- data Label = Label' {}
- newLabel :: Label
- label_confidence :: Lens' Label (Maybe Double)
- label_parents :: Lens' Label (Maybe [Parent])
- label_name :: Lens' Label (Maybe Text)
- label_instances :: Lens' Label (Maybe [Instance])
- data LabelDetection = LabelDetection' {}
- newLabelDetection :: LabelDetection
- labelDetection_label :: Lens' LabelDetection (Maybe Label)
- labelDetection_timestamp :: Lens' LabelDetection (Maybe Integer)
- data Landmark = Landmark' {}
- newLandmark :: Landmark
- landmark_type :: Lens' Landmark (Maybe LandmarkType)
- landmark_x :: Lens' Landmark (Maybe Double)
- landmark_y :: Lens' Landmark (Maybe Double)
- data ModerationLabel = ModerationLabel' {
- confidence :: Maybe Double
- name :: Maybe Text
- parentName :: Maybe Text
- newModerationLabel :: ModerationLabel
- moderationLabel_confidence :: Lens' ModerationLabel (Maybe Double)
- moderationLabel_name :: Lens' ModerationLabel (Maybe Text)
- moderationLabel_parentName :: Lens' ModerationLabel (Maybe Text)
- data MouthOpen = MouthOpen' {}
- newMouthOpen :: MouthOpen
- mouthOpen_value :: Lens' MouthOpen (Maybe Bool)
- mouthOpen_confidence :: Lens' MouthOpen (Maybe Double)
- data Mustache = Mustache' {}
- newMustache :: Mustache
- mustache_value :: Lens' Mustache (Maybe Bool)
- mustache_confidence :: Lens' Mustache (Maybe Double)
- data NotificationChannel = NotificationChannel' {
- sNSTopicArn :: Text
- roleArn :: Text
- newNotificationChannel :: Text -> Text -> NotificationChannel
- notificationChannel_sNSTopicArn :: Lens' NotificationChannel Text
- notificationChannel_roleArn :: Lens' NotificationChannel Text
- data OutputConfig = OutputConfig' {}
- newOutputConfig :: OutputConfig
- outputConfig_s3KeyPrefix :: Lens' OutputConfig (Maybe Text)
- outputConfig_s3Bucket :: Lens' OutputConfig (Maybe Text)
- data Parent = Parent' {}
- newParent :: Parent
- parent_name :: Lens' Parent (Maybe Text)
- data PersonDetail = PersonDetail' {}
- newPersonDetail :: PersonDetail
- personDetail_boundingBox :: Lens' PersonDetail (Maybe BoundingBox)
- personDetail_index :: Lens' PersonDetail (Maybe Integer)
- personDetail_face :: Lens' PersonDetail (Maybe FaceDetail)
- data PersonDetection = PersonDetection' {}
- newPersonDetection :: PersonDetection
- personDetection_person :: Lens' PersonDetection (Maybe PersonDetail)
- personDetection_timestamp :: Lens' PersonDetection (Maybe Integer)
- data PersonMatch = PersonMatch' {}
- newPersonMatch :: PersonMatch
- personMatch_faceMatches :: Lens' PersonMatch (Maybe [FaceMatch])
- personMatch_person :: Lens' PersonMatch (Maybe PersonDetail)
- personMatch_timestamp :: Lens' PersonMatch (Maybe Integer)
- data Point = Point' {}
- newPoint :: Point
- point_x :: Lens' Point (Maybe Double)
- point_y :: Lens' Point (Maybe Double)
- data Pose = Pose' {}
- newPose :: Pose
- pose_yaw :: Lens' Pose (Maybe Double)
- pose_roll :: Lens' Pose (Maybe Double)
- pose_pitch :: Lens' Pose (Maybe Double)
- data ProjectDescription = ProjectDescription' {}
- newProjectDescription :: ProjectDescription
- projectDescription_status :: Lens' ProjectDescription (Maybe ProjectStatus)
- projectDescription_creationTimestamp :: Lens' ProjectDescription (Maybe UTCTime)
- projectDescription_projectArn :: Lens' ProjectDescription (Maybe Text)
- data ProjectVersionDescription = ProjectVersionDescription' {
- minInferenceUnits :: Maybe Natural
- status :: Maybe ProjectVersionStatus
- evaluationResult :: Maybe EvaluationResult
- manifestSummary :: Maybe GroundTruthManifest
- kmsKeyId :: Maybe Text
- testingDataResult :: Maybe TestingDataResult
- statusMessage :: Maybe Text
- creationTimestamp :: Maybe POSIX
- projectVersionArn :: Maybe Text
- outputConfig :: Maybe OutputConfig
- billableTrainingTimeInSeconds :: Maybe Natural
- trainingEndTimestamp :: Maybe POSIX
- trainingDataResult :: Maybe TrainingDataResult
- newProjectVersionDescription :: ProjectVersionDescription
- projectVersionDescription_minInferenceUnits :: Lens' ProjectVersionDescription (Maybe Natural)
- projectVersionDescription_status :: Lens' ProjectVersionDescription (Maybe ProjectVersionStatus)
- projectVersionDescription_evaluationResult :: Lens' ProjectVersionDescription (Maybe EvaluationResult)
- projectVersionDescription_manifestSummary :: Lens' ProjectVersionDescription (Maybe GroundTruthManifest)
- projectVersionDescription_kmsKeyId :: Lens' ProjectVersionDescription (Maybe Text)
- projectVersionDescription_testingDataResult :: Lens' ProjectVersionDescription (Maybe TestingDataResult)
- projectVersionDescription_statusMessage :: Lens' ProjectVersionDescription (Maybe Text)
- projectVersionDescription_creationTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime)
- projectVersionDescription_projectVersionArn :: Lens' ProjectVersionDescription (Maybe Text)
- projectVersionDescription_outputConfig :: Lens' ProjectVersionDescription (Maybe OutputConfig)
- projectVersionDescription_billableTrainingTimeInSeconds :: Lens' ProjectVersionDescription (Maybe Natural)
- projectVersionDescription_trainingEndTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime)
- projectVersionDescription_trainingDataResult :: Lens' ProjectVersionDescription (Maybe TrainingDataResult)
- data ProtectiveEquipmentBodyPart = ProtectiveEquipmentBodyPart' {}
- newProtectiveEquipmentBodyPart :: ProtectiveEquipmentBodyPart
- protectiveEquipmentBodyPart_equipmentDetections :: Lens' ProtectiveEquipmentBodyPart (Maybe [EquipmentDetection])
- protectiveEquipmentBodyPart_confidence :: Lens' ProtectiveEquipmentBodyPart (Maybe Double)
- protectiveEquipmentBodyPart_name :: Lens' ProtectiveEquipmentBodyPart (Maybe BodyPart)
- data ProtectiveEquipmentPerson = ProtectiveEquipmentPerson' {}
- newProtectiveEquipmentPerson :: ProtectiveEquipmentPerson
- protectiveEquipmentPerson_bodyParts :: Lens' ProtectiveEquipmentPerson (Maybe [ProtectiveEquipmentBodyPart])
- protectiveEquipmentPerson_boundingBox :: Lens' ProtectiveEquipmentPerson (Maybe BoundingBox)
- protectiveEquipmentPerson_confidence :: Lens' ProtectiveEquipmentPerson (Maybe Double)
- protectiveEquipmentPerson_id :: Lens' ProtectiveEquipmentPerson (Maybe Natural)
- data ProtectiveEquipmentSummarizationAttributes = ProtectiveEquipmentSummarizationAttributes' {}
- newProtectiveEquipmentSummarizationAttributes :: Double -> ProtectiveEquipmentSummarizationAttributes
- protectiveEquipmentSummarizationAttributes_minConfidence :: Lens' ProtectiveEquipmentSummarizationAttributes Double
- protectiveEquipmentSummarizationAttributes_requiredEquipmentTypes :: Lens' ProtectiveEquipmentSummarizationAttributes [ProtectiveEquipmentType]
- data ProtectiveEquipmentSummary = ProtectiveEquipmentSummary' {}
- newProtectiveEquipmentSummary :: ProtectiveEquipmentSummary
- protectiveEquipmentSummary_personsWithRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural])
- protectiveEquipmentSummary_personsWithoutRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural])
- protectiveEquipmentSummary_personsIndeterminate :: Lens' ProtectiveEquipmentSummary (Maybe [Natural])
- data RegionOfInterest = RegionOfInterest' {}
- newRegionOfInterest :: RegionOfInterest
- regionOfInterest_boundingBox :: Lens' RegionOfInterest (Maybe BoundingBox)
- data S3Object = S3Object' {}
- newS3Object :: S3Object
- s3Object_bucket :: Lens' S3Object (Maybe Text)
- s3Object_name :: Lens' S3Object (Maybe Text)
- s3Object_version :: Lens' S3Object (Maybe Text)
- data SegmentDetection = SegmentDetection' {
- technicalCueSegment :: Maybe TechnicalCueSegment
- endFrameNumber :: Maybe Natural
- durationSMPTE :: Maybe Text
- endTimestampMillis :: Maybe Integer
- startTimecodeSMPTE :: Maybe Text
- endTimecodeSMPTE :: Maybe Text
- durationMillis :: Maybe Natural
- durationFrames :: Maybe Natural
- startTimestampMillis :: Maybe Integer
- type' :: Maybe SegmentType
- shotSegment :: Maybe ShotSegment
- startFrameNumber :: Maybe Natural
- newSegmentDetection :: SegmentDetection
- segmentDetection_technicalCueSegment :: Lens' SegmentDetection (Maybe TechnicalCueSegment)
- segmentDetection_endFrameNumber :: Lens' SegmentDetection (Maybe Natural)
- segmentDetection_durationSMPTE :: Lens' SegmentDetection (Maybe Text)
- segmentDetection_endTimestampMillis :: Lens' SegmentDetection (Maybe Integer)
- segmentDetection_startTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text)
- segmentDetection_endTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text)
- segmentDetection_durationMillis :: Lens' SegmentDetection (Maybe Natural)
- segmentDetection_durationFrames :: Lens' SegmentDetection (Maybe Natural)
- segmentDetection_startTimestampMillis :: Lens' SegmentDetection (Maybe Integer)
- segmentDetection_type :: Lens' SegmentDetection (Maybe SegmentType)
- segmentDetection_shotSegment :: Lens' SegmentDetection (Maybe ShotSegment)
- segmentDetection_startFrameNumber :: Lens' SegmentDetection (Maybe Natural)
- data SegmentTypeInfo = SegmentTypeInfo' {}
- newSegmentTypeInfo :: SegmentTypeInfo
- segmentTypeInfo_modelVersion :: Lens' SegmentTypeInfo (Maybe Text)
- segmentTypeInfo_type :: Lens' SegmentTypeInfo (Maybe SegmentType)
- data ShotSegment = ShotSegment' {}
- newShotSegment :: ShotSegment
- shotSegment_confidence :: Lens' ShotSegment (Maybe Double)
- shotSegment_index :: Lens' ShotSegment (Maybe Natural)
- data Smile = Smile' {}
- newSmile :: Smile
- smile_value :: Lens' Smile (Maybe Bool)
- smile_confidence :: Lens' Smile (Maybe Double)
- data StartSegmentDetectionFilters = StartSegmentDetectionFilters' {}
- newStartSegmentDetectionFilters :: StartSegmentDetectionFilters
- startSegmentDetectionFilters_technicalCueFilter :: Lens' StartSegmentDetectionFilters (Maybe StartTechnicalCueDetectionFilter)
- startSegmentDetectionFilters_shotFilter :: Lens' StartSegmentDetectionFilters (Maybe StartShotDetectionFilter)
- data StartShotDetectionFilter = StartShotDetectionFilter' {}
- newStartShotDetectionFilter :: StartShotDetectionFilter
- startShotDetectionFilter_minSegmentConfidence :: Lens' StartShotDetectionFilter (Maybe Double)
- data StartTechnicalCueDetectionFilter = StartTechnicalCueDetectionFilter' {}
- newStartTechnicalCueDetectionFilter :: StartTechnicalCueDetectionFilter
- startTechnicalCueDetectionFilter_blackFrame :: Lens' StartTechnicalCueDetectionFilter (Maybe BlackFrame)
- startTechnicalCueDetectionFilter_minSegmentConfidence :: Lens' StartTechnicalCueDetectionFilter (Maybe Double)
- data StartTextDetectionFilters = StartTextDetectionFilters' {}
- newStartTextDetectionFilters :: StartTextDetectionFilters
- startTextDetectionFilters_regionsOfInterest :: Lens' StartTextDetectionFilters (Maybe [RegionOfInterest])
- startTextDetectionFilters_wordFilter :: Lens' StartTextDetectionFilters (Maybe DetectionFilter)
- data StreamProcessor = StreamProcessor' {}
- newStreamProcessor :: StreamProcessor
- streamProcessor_status :: Lens' StreamProcessor (Maybe StreamProcessorStatus)
- streamProcessor_name :: Lens' StreamProcessor (Maybe Text)
- data StreamProcessorInput = StreamProcessorInput' {}
- newStreamProcessorInput :: StreamProcessorInput
- streamProcessorInput_kinesisVideoStream :: Lens' StreamProcessorInput (Maybe KinesisVideoStream)
- data StreamProcessorOutput = StreamProcessorOutput' {}
- newStreamProcessorOutput :: StreamProcessorOutput
- streamProcessorOutput_kinesisDataStream :: Lens' StreamProcessorOutput (Maybe KinesisDataStream)
- data StreamProcessorSettings = StreamProcessorSettings' {}
- newStreamProcessorSettings :: StreamProcessorSettings
- streamProcessorSettings_faceSearch :: Lens' StreamProcessorSettings (Maybe FaceSearchSettings)
- data Summary = Summary' {}
- newSummary :: Summary
- summary_s3Object :: Lens' Summary (Maybe S3Object)
- data Sunglasses = Sunglasses' {}
- newSunglasses :: Sunglasses
- sunglasses_value :: Lens' Sunglasses (Maybe Bool)
- sunglasses_confidence :: Lens' Sunglasses (Maybe Double)
- data TechnicalCueSegment = TechnicalCueSegment' {}
- newTechnicalCueSegment :: TechnicalCueSegment
- technicalCueSegment_confidence :: Lens' TechnicalCueSegment (Maybe Double)
- technicalCueSegment_type :: Lens' TechnicalCueSegment (Maybe TechnicalCueType)
- data TestingData = TestingData' {}
- newTestingData :: TestingData
- testingData_assets :: Lens' TestingData (Maybe [Asset])
- testingData_autoCreate :: Lens' TestingData (Maybe Bool)
- data TestingDataResult = TestingDataResult' {}
- newTestingDataResult :: TestingDataResult
- testingDataResult_input :: Lens' TestingDataResult (Maybe TestingData)
- testingDataResult_output :: Lens' TestingDataResult (Maybe TestingData)
- testingDataResult_validation :: Lens' TestingDataResult (Maybe ValidationData)
- data TextDetection = TextDetection' {}
- newTextDetection :: TextDetection
- textDetection_detectedText :: Lens' TextDetection (Maybe Text)
- textDetection_confidence :: Lens' TextDetection (Maybe Double)
- textDetection_geometry :: Lens' TextDetection (Maybe Geometry)
- textDetection_id :: Lens' TextDetection (Maybe Natural)
- textDetection_type :: Lens' TextDetection (Maybe TextTypes)
- textDetection_parentId :: Lens' TextDetection (Maybe Natural)
- data TextDetectionResult = TextDetectionResult' {}
- newTextDetectionResult :: TextDetectionResult
- textDetectionResult_textDetection :: Lens' TextDetectionResult (Maybe TextDetection)
- textDetectionResult_timestamp :: Lens' TextDetectionResult (Maybe Integer)
- data TrainingData = TrainingData' {}
- newTrainingData :: TrainingData
- trainingData_assets :: Lens' TrainingData (Maybe [Asset])
- data TrainingDataResult = TrainingDataResult' {}
- newTrainingDataResult :: TrainingDataResult
- trainingDataResult_input :: Lens' TrainingDataResult (Maybe TrainingData)
- trainingDataResult_output :: Lens' TrainingDataResult (Maybe TrainingData)
- trainingDataResult_validation :: Lens' TrainingDataResult (Maybe ValidationData)
- data UnindexedFace = UnindexedFace' {
- reasons :: Maybe [Reason]
- faceDetail :: Maybe FaceDetail
- newUnindexedFace :: UnindexedFace
- unindexedFace_reasons :: Lens' UnindexedFace (Maybe [Reason])
- unindexedFace_faceDetail :: Lens' UnindexedFace (Maybe FaceDetail)
- data ValidationData = ValidationData' {}
- newValidationData :: ValidationData
- validationData_assets :: Lens' ValidationData (Maybe [Asset])
- data Video = Video' {}
- newVideo :: Video
- video_s3Object :: Lens' Video (Maybe S3Object)
- data VideoMetadata = VideoMetadata' {}
- newVideoMetadata :: VideoMetadata
- videoMetadata_frameRate :: Lens' VideoMetadata (Maybe Double)
- videoMetadata_colorRange :: Lens' VideoMetadata (Maybe VideoColorRange)
- videoMetadata_format :: Lens' VideoMetadata (Maybe Text)
- videoMetadata_codec :: Lens' VideoMetadata (Maybe Text)
- videoMetadata_frameHeight :: Lens' VideoMetadata (Maybe Natural)
- videoMetadata_durationMillis :: Lens' VideoMetadata (Maybe Natural)
- videoMetadata_frameWidth :: Lens' VideoMetadata (Maybe Natural)
Service Configuration
defaultService :: Service Source #
API version 2016-06-27
of the Amazon Rekognition SDK configuration.
Errors
_AccessDeniedException :: AsError a => Getting (First ServiceError) a ServiceError Source #
You are not authorized to perform the action.
_VideoTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The file size or duration of the supplied media is too large. The maximum file size is 10GB. The maximum duration is 6 hours.
_InvalidParameterException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Input parameter violated a constraint. Validate your parameter before calling the API operation again.
_InvalidImageFormatException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The provided image format is not supported.
_ResourceAlreadyExistsException :: AsError a => Getting (First ServiceError) a ServiceError Source #
A resource with the specified ID already exists.
_InvalidS3ObjectException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Amazon Rekognition is unable to access the S3 object specified in the request.
_ProvisionedThroughputExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.
_ImageTooLargeException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment, the image size or resolution exceeds the allowed limit. For more information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
_ServiceQuotaExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The size of the collection exceeds the allowed limit. For more information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
_ThrottlingException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Amazon Rekognition is temporarily unable to process the request. Try your call again.
_InternalServerError :: AsError a => Getting (First ServiceError) a ServiceError Source #
Amazon Rekognition experienced a service issue. Try your call again.
_IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError Source #
A ClientRequestToken
input parameter was reused with an operation, but
at least one of the other input parameters is different from the
previous call to the operation.
_ResourceNotReadyException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The requested resource isn't ready. For example, this exception occurs
when you call DetectCustomLabels
with a model version that isn't
deployed.
_ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The resource specified in the request cannot be found.
_HumanLoopQuotaExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The number of in-progress human reviews you have has exceeded the number allowed.
_InvalidPaginationTokenException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Pagination token in the request is not valid.
_LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
An Amazon Rekognition service limit was exceeded. For example, if you
start too many Amazon Rekognition Video jobs concurrently, calls to
start operations (StartLabelDetection
, for example) will raise a
LimitExceededException
exception (HTTP status code: 400) until the
number of concurrently running jobs is below the Amazon Rekognition
service limit.
_ResourceInUseException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The specified resource is already being used.
Attribute
pattern Attribute_ALL :: Attribute | |
pattern Attribute_DEFAULT :: Attribute |
Instances
BodyPart
pattern BodyPart_FACE :: BodyPart | |
pattern BodyPart_HEAD :: BodyPart | |
pattern BodyPart_LEFT_HAND :: BodyPart | |
pattern BodyPart_RIGHT_HAND :: BodyPart |
Instances
CelebrityRecognitionSortBy
newtype CelebrityRecognitionSortBy Source #
pattern CelebrityRecognitionSortBy_ID :: CelebrityRecognitionSortBy | |
pattern CelebrityRecognitionSortBy_TIMESTAMP :: CelebrityRecognitionSortBy |
Instances
ContentClassifier
newtype ContentClassifier Source #
pattern ContentClassifier_FreeOfAdultContent :: ContentClassifier | |
pattern ContentClassifier_FreeOfPersonallyIdentifiableInformation :: ContentClassifier |
Instances
ContentModerationSortBy
newtype ContentModerationSortBy Source #
pattern ContentModerationSortBy_NAME :: ContentModerationSortBy | |
pattern ContentModerationSortBy_TIMESTAMP :: ContentModerationSortBy |
Instances
EmotionName
newtype EmotionName Source #
pattern EmotionName_ANGRY :: EmotionName | |
pattern EmotionName_CALM :: EmotionName | |
pattern EmotionName_CONFUSED :: EmotionName | |
pattern EmotionName_DISGUSTED :: EmotionName | |
pattern EmotionName_FEAR :: EmotionName | |
pattern EmotionName_HAPPY :: EmotionName | |
pattern EmotionName_SAD :: EmotionName | |
pattern EmotionName_SURPRISED :: EmotionName | |
pattern EmotionName_UNKNOWN :: EmotionName |
Instances
FaceAttributes
newtype FaceAttributes Source #
pattern FaceAttributes_ALL :: FaceAttributes | |
pattern FaceAttributes_DEFAULT :: FaceAttributes |
Instances
FaceSearchSortBy
newtype FaceSearchSortBy Source #
pattern FaceSearchSortBy_INDEX :: FaceSearchSortBy | |
pattern FaceSearchSortBy_TIMESTAMP :: FaceSearchSortBy |
Instances
GenderType
newtype GenderType Source #
pattern GenderType_Female :: GenderType | |
pattern GenderType_Male :: GenderType |
Instances
KnownGenderType
newtype KnownGenderType Source #
A list of enum string of possible gender values that Celebrity returns.
pattern KnownGenderType_Female :: KnownGenderType | |
pattern KnownGenderType_Male :: KnownGenderType |
Instances
LabelDetectionSortBy
newtype LabelDetectionSortBy Source #
pattern LabelDetectionSortBy_NAME :: LabelDetectionSortBy | |
pattern LabelDetectionSortBy_TIMESTAMP :: LabelDetectionSortBy |
Instances
LandmarkType
newtype LandmarkType Source #
Instances
OrientationCorrection
newtype OrientationCorrection Source #
Instances
PersonTrackingSortBy
newtype PersonTrackingSortBy Source #
pattern PersonTrackingSortBy_INDEX :: PersonTrackingSortBy | |
pattern PersonTrackingSortBy_TIMESTAMP :: PersonTrackingSortBy |
Instances
ProjectStatus
newtype ProjectStatus Source #
pattern ProjectStatus_CREATED :: ProjectStatus | |
pattern ProjectStatus_CREATING :: ProjectStatus | |
pattern ProjectStatus_DELETING :: ProjectStatus |
Instances
ProjectVersionStatus
newtype ProjectVersionStatus Source #
Instances
ProtectiveEquipmentType
newtype ProtectiveEquipmentType Source #
Instances
QualityFilter
newtype QualityFilter Source #
pattern QualityFilter_AUTO :: QualityFilter | |
pattern QualityFilter_HIGH :: QualityFilter | |
pattern QualityFilter_LOW :: QualityFilter | |
pattern QualityFilter_MEDIUM :: QualityFilter | |
pattern QualityFilter_NONE :: QualityFilter |
Instances
Reason
pattern Reason_EXCEEDS_MAX_FACES :: Reason | |
pattern Reason_EXTREME_POSE :: Reason | |
pattern Reason_LOW_BRIGHTNESS :: Reason | |
pattern Reason_LOW_CONFIDENCE :: Reason | |
pattern Reason_LOW_FACE_QUALITY :: Reason | |
pattern Reason_LOW_SHARPNESS :: Reason | |
pattern Reason_SMALL_BOUNDING_BOX :: Reason |
Instances
SegmentType
newtype SegmentType Source #
pattern SegmentType_SHOT :: SegmentType | |
pattern SegmentType_TECHNICAL_CUE :: SegmentType |
Instances
StreamProcessorStatus
newtype StreamProcessorStatus Source #
pattern StreamProcessorStatus_FAILED :: StreamProcessorStatus | |
pattern StreamProcessorStatus_RUNNING :: StreamProcessorStatus | |
pattern StreamProcessorStatus_STARTING :: StreamProcessorStatus | |
pattern StreamProcessorStatus_STOPPED :: StreamProcessorStatus | |
pattern StreamProcessorStatus_STOPPING :: StreamProcessorStatus |
Instances
TechnicalCueType
newtype TechnicalCueType Source #
pattern TechnicalCueType_BlackFrames :: TechnicalCueType | |
pattern TechnicalCueType_ColorBars :: TechnicalCueType | |
pattern TechnicalCueType_Content :: TechnicalCueType | |
pattern TechnicalCueType_EndCredits :: TechnicalCueType | |
pattern TechnicalCueType_OpeningCredits :: TechnicalCueType | |
pattern TechnicalCueType_Slate :: TechnicalCueType | |
pattern TechnicalCueType_StudioLogo :: TechnicalCueType |
Instances
TextTypes
pattern TextTypes_LINE :: TextTypes | |
pattern TextTypes_WORD :: TextTypes |
Instances
VideoColorRange
newtype VideoColorRange Source #
pattern VideoColorRange_FULL :: VideoColorRange | |
pattern VideoColorRange_LIMITED :: VideoColorRange |
Instances
VideoJobStatus
newtype VideoJobStatus Source #
pattern VideoJobStatus_FAILED :: VideoJobStatus | |
pattern VideoJobStatus_IN_PROGRESS :: VideoJobStatus | |
pattern VideoJobStatus_SUCCEEDED :: VideoJobStatus |
Instances
AgeRange
Structure containing the estimated age range, in years, for a face.
Amazon Rekognition estimates an age range for faces detected in the input image. Estimated age ranges can overlap. A face of a 5-year-old might have an estimated range of 4-6, while the face of a 6-year-old might have an estimated range of 4-8.
See: newAgeRange
smart constructor.
Instances
Eq AgeRange Source # | |
Read AgeRange Source # | |
Show AgeRange Source # | |
Generic AgeRange Source # | |
NFData AgeRange Source # | |
Defined in Amazonka.Rekognition.Types.AgeRange | |
Hashable AgeRange Source # | |
Defined in Amazonka.Rekognition.Types.AgeRange | |
FromJSON AgeRange Source # | |
type Rep AgeRange Source # | |
Defined in Amazonka.Rekognition.Types.AgeRange type Rep AgeRange = D1 ('MetaData "AgeRange" "Amazonka.Rekognition.Types.AgeRange" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "AgeRange'" 'PrefixI 'True) (S1 ('MetaSel ('Just "low") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)) :*: S1 ('MetaSel ('Just "high") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)))) |
newAgeRange :: AgeRange Source #
Create a value of AgeRange
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:low:AgeRange'
, ageRange_low
- The lowest estimated age.
$sel:high:AgeRange'
, ageRange_high
- The highest estimated age.
Asset
Assets are the images that you use to train and evaluate a model version. Assets can also contain validation information that you use to debug a failed model training.
See: newAsset
smart constructor.
Instances
Eq Asset Source # | |
Read Asset Source # | |
Show Asset Source # | |
Generic Asset Source # | |
NFData Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset | |
Hashable Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset | |
ToJSON Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset | |
FromJSON Asset Source # | |
type Rep Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset type Rep Asset = D1 ('MetaData "Asset" "Amazonka.Rekognition.Types.Asset" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Asset'" 'PrefixI 'True) (S1 ('MetaSel ('Just "groundTruthManifest") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe GroundTruthManifest)))) |
Create a value of Asset
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:groundTruthManifest:Asset'
, asset_groundTruthManifest
- Undocumented member.
asset_groundTruthManifest :: Lens' Asset (Maybe GroundTruthManifest) Source #
Undocumented member.
AudioMetadata
data AudioMetadata Source #
Metadata information about an audio stream. An array of AudioMetadata
objects for the audio streams found in a stored video is returned by
GetSegmentDetection.
See: newAudioMetadata
smart constructor.
AudioMetadata' | |
|
Instances
newAudioMetadata :: AudioMetadata Source #
Create a value of AudioMetadata
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:codec:AudioMetadata'
, audioMetadata_codec
- The audio codec used to encode or decode the audio stream.
$sel:sampleRate:AudioMetadata'
, audioMetadata_sampleRate
- The sample rate for the audio stream.
$sel:numberOfChannels:AudioMetadata'
, audioMetadata_numberOfChannels
- The number of audio channels in the segment.
$sel:durationMillis:AudioMetadata'
, audioMetadata_durationMillis
- The duration of the audio stream in milliseconds.
audioMetadata_codec :: Lens' AudioMetadata (Maybe Text) Source #
The audio codec used to encode or decode the audio stream.
audioMetadata_sampleRate :: Lens' AudioMetadata (Maybe Natural) Source #
The sample rate for the audio stream.
audioMetadata_numberOfChannels :: Lens' AudioMetadata (Maybe Natural) Source #
The number of audio channels in the segment.
audioMetadata_durationMillis :: Lens' AudioMetadata (Maybe Natural) Source #
The duration of the audio stream in milliseconds.
Beard
Indicates whether or not the face has a beard, and the confidence level in the determination.
See: newBeard
smart constructor.
Instances
Eq Beard Source # | |
Read Beard Source # | |
Show Beard Source # | |
Generic Beard Source # | |
NFData Beard Source # | |
Defined in Amazonka.Rekognition.Types.Beard | |
Hashable Beard Source # | |
Defined in Amazonka.Rekognition.Types.Beard | |
FromJSON Beard Source # | |
type Rep Beard Source # | |
Defined in Amazonka.Rekognition.Types.Beard type Rep Beard = D1 ('MetaData "Beard" "Amazonka.Rekognition.Types.Beard" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Beard'" 'PrefixI 'True) (S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)) :*: S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
Create a value of Beard
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:Beard'
, beard_value
- Boolean value that indicates whether the face has beard or not.
$sel:confidence:Beard'
, beard_confidence
- Level of confidence in the determination.
beard_value :: Lens' Beard (Maybe Bool) Source #
Boolean value that indicates whether the face has beard or not.
BlackFrame
data BlackFrame Source #
A filter that allows you to control the black frame detection by specifying the black levels and pixel coverage of black pixels in a frame. As videos can come from multiple sources, formats, and time periods, they may contain different standards and varying noise levels for black frames that need to be accounted for. For more information, see StartSegmentDetection.
See: newBlackFrame
smart constructor.
BlackFrame' | |
|
Instances
newBlackFrame :: BlackFrame Source #
Create a value of BlackFrame
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:maxPixelThreshold:BlackFrame'
, blackFrame_maxPixelThreshold
- A threshold used to determine the maximum luminance value for a pixel to
be considered black. In a full color range video, luminance values range
from 0-255. A pixel value of 0 is pure black, and the most strict
filter. The maximum black pixel value is computed as follows:
max_black_pixel_value = minimum_luminance + MaxPixelThreshold
*luminance_range.
For example, for a full range video with BlackPixelThreshold = 0.1, max_black_pixel_value is 0 + 0.1 * (255-0) = 25.5.
The default value of MaxPixelThreshold is 0.2, which maps to a max_black_pixel_value of 51 for a full range video. You can lower this threshold to be more strict on black levels.
$sel:minCoveragePercentage:BlackFrame'
, blackFrame_minCoveragePercentage
- The minimum percentage of pixels in a frame that need to have a
luminance below the max_black_pixel_value for a frame to be considered a
black frame. Luminance is calculated using the BT.709 matrix.
The default value is 99, which means at least 99% of all pixels in the
frame are black pixels as per the MaxPixelThreshold
set. You can
reduce this value to allow more noise on the black frame.
blackFrame_maxPixelThreshold :: Lens' BlackFrame (Maybe Double) Source #
A threshold used to determine the maximum luminance value for a pixel to be considered black. In a full color range video, luminance values range from 0-255. A pixel value of 0 is pure black, and the most strict filter. The maximum black pixel value is computed as follows: max_black_pixel_value = minimum_luminance + MaxPixelThreshold *luminance_range.
For example, for a full range video with BlackPixelThreshold = 0.1, max_black_pixel_value is 0 + 0.1 * (255-0) = 25.5.
The default value of MaxPixelThreshold is 0.2, which maps to a max_black_pixel_value of 51 for a full range video. You can lower this threshold to be more strict on black levels.
blackFrame_minCoveragePercentage :: Lens' BlackFrame (Maybe Double) Source #
The minimum percentage of pixels in a frame that need to have a luminance below the max_black_pixel_value for a frame to be considered a black frame. Luminance is calculated using the BT.709 matrix.
The default value is 99, which means at least 99% of all pixels in the
frame are black pixels as per the MaxPixelThreshold
set. You can
reduce this value to allow more noise on the black frame.
BoundingBox
data BoundingBox Source #
Identifies the bounding box around the label, face, text or personal
protective equipment. The left
(x-coordinate) and top
(y-coordinate)
are coordinates representing the top and left sides of the bounding box.
Note that the upper-left corner of the image is the origin (0,0).
The top
and left
values returned are ratios of the overall image
size. For example, if the input image is 700x200 pixels, and the
top-left coordinate of the bounding box is 350x50 pixels, the API
returns a left
value of 0.5 (350/700) and a top
value of 0.25
(50/200).
The width
and height
values represent the dimensions of the bounding
box as a ratio of the overall image dimension. For example, if the input
image is 700x200 pixels, and the bounding box width is 70 pixels, the
width returned is 0.1.
The bounding box coordinates can have negative values. For example, if
Amazon Rekognition is able to detect a face that is at the image edge
and is only partially visible, the service can return coordinates that
are outside the image bounds and, depending on the image edge, you might
get negative values or values greater than 1 for the left
or top
values.
See: newBoundingBox
smart constructor.
BoundingBox' | |
|
Instances
newBoundingBox :: BoundingBox Source #
Create a value of BoundingBox
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:height:BoundingBox'
, boundingBox_height
- Height of the bounding box as a ratio of the overall image height.
$sel:left:BoundingBox'
, boundingBox_left
- Left coordinate of the bounding box as a ratio of overall image width.
$sel:width:BoundingBox'
, boundingBox_width
- Width of the bounding box as a ratio of the overall image width.
$sel:top:BoundingBox'
, boundingBox_top
- Top coordinate of the bounding box as a ratio of overall image height.
boundingBox_height :: Lens' BoundingBox (Maybe Double) Source #
Height of the bounding box as a ratio of the overall image height.
boundingBox_left :: Lens' BoundingBox (Maybe Double) Source #
Left coordinate of the bounding box as a ratio of overall image width.
boundingBox_width :: Lens' BoundingBox (Maybe Double) Source #
Width of the bounding box as a ratio of the overall image width.
boundingBox_top :: Lens' BoundingBox (Maybe Double) Source #
Top coordinate of the bounding box as a ratio of overall image height.
Celebrity
Provides information about a celebrity recognized by the RecognizeCelebrities operation.
See: newCelebrity
smart constructor.
Celebrity' | |
|
Instances
newCelebrity :: Celebrity Source #
Create a value of Celebrity
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:matchConfidence:Celebrity'
, celebrity_matchConfidence
- The confidence, in percentage, that Amazon Rekognition has that the
recognized face is the celebrity.
$sel:urls:Celebrity'
, celebrity_urls
- An array of URLs pointing to additional information about the celebrity.
If there is no additional information about the celebrity, this list is
empty.
$sel:knownGender:Celebrity'
, celebrity_knownGender
- Undocumented member.
$sel:name:Celebrity'
, celebrity_name
- The name of the celebrity.
$sel:id:Celebrity'
, celebrity_id
- A unique identifier for the celebrity.
$sel:face:Celebrity'
, celebrity_face
- Provides information about the celebrity's face, such as its location
on the image.
celebrity_matchConfidence :: Lens' Celebrity (Maybe Double) Source #
The confidence, in percentage, that Amazon Rekognition has that the recognized face is the celebrity.
celebrity_urls :: Lens' Celebrity (Maybe [Text]) Source #
An array of URLs pointing to additional information about the celebrity. If there is no additional information about the celebrity, this list is empty.
celebrity_knownGender :: Lens' Celebrity (Maybe KnownGender) Source #
Undocumented member.
celebrity_face :: Lens' Celebrity (Maybe ComparedFace) Source #
Provides information about the celebrity's face, such as its location on the image.
CelebrityDetail
data CelebrityDetail Source #
Information about a recognized celebrity.
See: newCelebrityDetail
smart constructor.
CelebrityDetail' | |
|
Instances
newCelebrityDetail :: CelebrityDetail Source #
Create a value of CelebrityDetail
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:CelebrityDetail'
, celebrityDetail_boundingBox
- Bounding box around the body of a celebrity.
$sel:urls:CelebrityDetail'
, celebrityDetail_urls
- An array of URLs pointing to additional celebrity information.
$sel:confidence:CelebrityDetail'
, celebrityDetail_confidence
- The confidence, in percentage, that Amazon Rekognition has that the
recognized face is the celebrity.
$sel:name:CelebrityDetail'
, celebrityDetail_name
- The name of the celebrity.
$sel:id:CelebrityDetail'
, celebrityDetail_id
- The unique identifier for the celebrity.
$sel:face:CelebrityDetail'
, celebrityDetail_face
- Face details for the recognized celebrity.
celebrityDetail_boundingBox :: Lens' CelebrityDetail (Maybe BoundingBox) Source #
Bounding box around the body of a celebrity.
celebrityDetail_urls :: Lens' CelebrityDetail (Maybe [Text]) Source #
An array of URLs pointing to additional celebrity information.
celebrityDetail_confidence :: Lens' CelebrityDetail (Maybe Double) Source #
The confidence, in percentage, that Amazon Rekognition has that the recognized face is the celebrity.
celebrityDetail_name :: Lens' CelebrityDetail (Maybe Text) Source #
The name of the celebrity.
celebrityDetail_id :: Lens' CelebrityDetail (Maybe Text) Source #
The unique identifier for the celebrity.
celebrityDetail_face :: Lens' CelebrityDetail (Maybe FaceDetail) Source #
Face details for the recognized celebrity.
CelebrityRecognition
data CelebrityRecognition Source #
Information about a detected celebrity and the time the celebrity was detected in a stored video. For more information, see GetCelebrityRecognition in the Amazon Rekognition Developer Guide.
See: newCelebrityRecognition
smart constructor.
Instances
newCelebrityRecognition :: CelebrityRecognition Source #
Create a value of CelebrityRecognition
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:celebrity:CelebrityRecognition'
, celebrityRecognition_celebrity
- Information about a recognized celebrity.
$sel:timestamp:CelebrityRecognition'
, celebrityRecognition_timestamp
- The time, in milliseconds from the start of the video, that the
celebrity was recognized.
celebrityRecognition_celebrity :: Lens' CelebrityRecognition (Maybe CelebrityDetail) Source #
Information about a recognized celebrity.
celebrityRecognition_timestamp :: Lens' CelebrityRecognition (Maybe Integer) Source #
The time, in milliseconds from the start of the video, that the celebrity was recognized.
CompareFacesMatch
data CompareFacesMatch Source #
Provides information about a face in a target image that matches the
source image face analyzed by CompareFaces
. The Face
property
contains the bounding box of the face in the target image. The
Similarity
property is the confidence that the source image face
matches the face in the bounding box.
See: newCompareFacesMatch
smart constructor.
CompareFacesMatch' | |
|
Instances
newCompareFacesMatch :: CompareFacesMatch Source #
Create a value of CompareFacesMatch
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:similarity:CompareFacesMatch'
, compareFacesMatch_similarity
- Level of confidence that the faces match.
$sel:face:CompareFacesMatch'
, compareFacesMatch_face
- Provides face metadata (bounding box and confidence that the bounding
box actually contains a face).
compareFacesMatch_similarity :: Lens' CompareFacesMatch (Maybe Double) Source #
Level of confidence that the faces match.
compareFacesMatch_face :: Lens' CompareFacesMatch (Maybe ComparedFace) Source #
Provides face metadata (bounding box and confidence that the bounding box actually contains a face).
ComparedFace
data ComparedFace Source #
Provides face metadata for target image faces that are analyzed by
CompareFaces
and RecognizeCelebrities
.
See: newComparedFace
smart constructor.
ComparedFace' | |
|
Instances
newComparedFace :: ComparedFace Source #
Create a value of ComparedFace
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:ComparedFace'
, comparedFace_boundingBox
- Bounding box of the face.
$sel:emotions:ComparedFace'
, comparedFace_emotions
- The emotions that appear to be expressed on the face, and the confidence
level in the determination. Valid values include "Happy", "Sad",
"Angry", "Confused", "Disgusted", "Surprised", "Calm",
"Unknown", and "Fear".
$sel:pose:ComparedFace'
, comparedFace_pose
- Indicates the pose of the face as determined by its pitch, roll, and
yaw.
$sel:confidence:ComparedFace'
, comparedFace_confidence
- Level of confidence that what the bounding box contains is a face.
$sel:quality:ComparedFace'
, comparedFace_quality
- Identifies face image brightness and sharpness.
$sel:smile:ComparedFace'
, comparedFace_smile
- Indicates whether or not the face is smiling, and the confidence level
in the determination.
$sel:landmarks:ComparedFace'
, comparedFace_landmarks
- An array of facial landmarks.
comparedFace_boundingBox :: Lens' ComparedFace (Maybe BoundingBox) Source #
Bounding box of the face.
comparedFace_emotions :: Lens' ComparedFace (Maybe [Emotion]) Source #
The emotions that appear to be expressed on the face, and the confidence level in the determination. Valid values include "Happy", "Sad", "Angry", "Confused", "Disgusted", "Surprised", "Calm", "Unknown", and "Fear".
comparedFace_pose :: Lens' ComparedFace (Maybe Pose) Source #
Indicates the pose of the face as determined by its pitch, roll, and yaw.
comparedFace_confidence :: Lens' ComparedFace (Maybe Double) Source #
Level of confidence that what the bounding box contains is a face.
comparedFace_quality :: Lens' ComparedFace (Maybe ImageQuality) Source #
Identifies face image brightness and sharpness.
comparedFace_smile :: Lens' ComparedFace (Maybe Smile) Source #
Indicates whether or not the face is smiling, and the confidence level in the determination.
comparedFace_landmarks :: Lens' ComparedFace (Maybe [Landmark]) Source #
An array of facial landmarks.
ComparedSourceImageFace
data ComparedSourceImageFace Source #
Type that describes the face Amazon Rekognition chose to compare with the faces in the target. This contains a bounding box for the selected face and confidence level that the bounding box contains a face. Note that Amazon Rekognition selects the largest face in the source image for this comparison.
See: newComparedSourceImageFace
smart constructor.
ComparedSourceImageFace' | |
|
Instances
newComparedSourceImageFace :: ComparedSourceImageFace Source #
Create a value of ComparedSourceImageFace
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:ComparedSourceImageFace'
, comparedSourceImageFace_boundingBox
- Bounding box of the face.
$sel:confidence:ComparedSourceImageFace'
, comparedSourceImageFace_confidence
- Confidence level that the selected bounding box contains a face.
comparedSourceImageFace_boundingBox :: Lens' ComparedSourceImageFace (Maybe BoundingBox) Source #
Bounding box of the face.
comparedSourceImageFace_confidence :: Lens' ComparedSourceImageFace (Maybe Double) Source #
Confidence level that the selected bounding box contains a face.
ContentModerationDetection
data ContentModerationDetection Source #
Information about an inappropriate, unwanted, or offensive content label detection in a stored video.
See: newContentModerationDetection
smart constructor.
ContentModerationDetection' | |
|
Instances
newContentModerationDetection :: ContentModerationDetection Source #
Create a value of ContentModerationDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:moderationLabel:ContentModerationDetection'
, contentModerationDetection_moderationLabel
- The content moderation label detected by in the stored video.
$sel:timestamp:ContentModerationDetection'
, contentModerationDetection_timestamp
- Time, in milliseconds from the beginning of the video, that the content
moderation label was detected.
contentModerationDetection_moderationLabel :: Lens' ContentModerationDetection (Maybe ModerationLabel) Source #
The content moderation label detected by in the stored video.
contentModerationDetection_timestamp :: Lens' ContentModerationDetection (Maybe Integer) Source #
Time, in milliseconds from the beginning of the video, that the content moderation label was detected.
CoversBodyPart
data CoversBodyPart Source #
Information about an item of Personal Protective Equipment covering a corresponding body part. For more information, see DetectProtectiveEquipment.
See: newCoversBodyPart
smart constructor.
Instances
newCoversBodyPart :: CoversBodyPart Source #
Create a value of CoversBodyPart
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:CoversBodyPart'
, coversBodyPart_value
- True if the PPE covers the corresponding body part, otherwise false.
$sel:confidence:CoversBodyPart'
, coversBodyPart_confidence
- The confidence that Amazon Rekognition has in the value of Value
.
coversBodyPart_value :: Lens' CoversBodyPart (Maybe Bool) Source #
True if the PPE covers the corresponding body part, otherwise false.
coversBodyPart_confidence :: Lens' CoversBodyPart (Maybe Double) Source #
The confidence that Amazon Rekognition has in the value of Value
.
CustomLabel
data CustomLabel Source #
A custom label detected in an image by a call to DetectCustomLabels.
See: newCustomLabel
smart constructor.
CustomLabel' | |
|
Instances
newCustomLabel :: CustomLabel Source #
Create a value of CustomLabel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:CustomLabel'
, customLabel_confidence
- The confidence that the model has in the detection of the custom label.
The range is 0-100. A higher value indicates a higher confidence.
$sel:name:CustomLabel'
, customLabel_name
- The name of the custom label.
$sel:geometry:CustomLabel'
, customLabel_geometry
- The location of the detected object on the image that corresponds to the
custom label. Includes an axis aligned coarse bounding box surrounding
the object and a finer grain polygon for more accurate spatial
information.
customLabel_confidence :: Lens' CustomLabel (Maybe Double) Source #
The confidence that the model has in the detection of the custom label. The range is 0-100. A higher value indicates a higher confidence.
customLabel_name :: Lens' CustomLabel (Maybe Text) Source #
The name of the custom label.
customLabel_geometry :: Lens' CustomLabel (Maybe Geometry) Source #
The location of the detected object on the image that corresponds to the custom label. Includes an axis aligned coarse bounding box surrounding the object and a finer grain polygon for more accurate spatial information.
DetectTextFilters
data DetectTextFilters Source #
A set of optional parameters that you can use to set the criteria that
the text must meet to be included in your response. WordFilter
looks
at a word’s height, width, and minimum confidence. RegionOfInterest
lets you set a specific region of the image to look for text in.
See: newDetectTextFilters
smart constructor.
DetectTextFilters' | |
|
Instances
newDetectTextFilters :: DetectTextFilters Source #
Create a value of DetectTextFilters
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:regionsOfInterest:DetectTextFilters'
, detectTextFilters_regionsOfInterest
- A Filter focusing on a certain area of the image. Uses a BoundingBox
object to set the region of the image.
$sel:wordFilter:DetectTextFilters'
, detectTextFilters_wordFilter
- Undocumented member.
detectTextFilters_regionsOfInterest :: Lens' DetectTextFilters (Maybe [RegionOfInterest]) Source #
A Filter focusing on a certain area of the image. Uses a BoundingBox
object to set the region of the image.
detectTextFilters_wordFilter :: Lens' DetectTextFilters (Maybe DetectionFilter) Source #
Undocumented member.
DetectionFilter
data DetectionFilter Source #
A set of parameters that allow you to filter out certain results from your returned results.
See: newDetectionFilter
smart constructor.
DetectionFilter' | |
|
Instances
newDetectionFilter :: DetectionFilter Source #
Create a value of DetectionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minBoundingBoxHeight:DetectionFilter'
, detectionFilter_minBoundingBoxHeight
- Sets the minimum height of the word bounding box. Words with bounding
box heights lesser than this value will be excluded from the result.
Value is relative to the video frame height.
$sel:minBoundingBoxWidth:DetectionFilter'
, detectionFilter_minBoundingBoxWidth
- Sets the minimum width of the word bounding box. Words with bounding
boxes widths lesser than this value will be excluded from the result.
Value is relative to the video frame width.
$sel:minConfidence:DetectionFilter'
, detectionFilter_minConfidence
- Sets the confidence of word detection. Words with detection confidence
below this will be excluded from the result. Values should be between 50
and 100 as Text in Video will not return any result below 50.
detectionFilter_minBoundingBoxHeight :: Lens' DetectionFilter (Maybe Double) Source #
Sets the minimum height of the word bounding box. Words with bounding box heights lesser than this value will be excluded from the result. Value is relative to the video frame height.
detectionFilter_minBoundingBoxWidth :: Lens' DetectionFilter (Maybe Double) Source #
Sets the minimum width of the word bounding box. Words with bounding boxes widths lesser than this value will be excluded from the result. Value is relative to the video frame width.
detectionFilter_minConfidence :: Lens' DetectionFilter (Maybe Double) Source #
Sets the confidence of word detection. Words with detection confidence below this will be excluded from the result. Values should be between 50 and 100 as Text in Video will not return any result below 50.
Emotion
The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.
See: newEmotion
smart constructor.
Emotion' | |
|
Instances
Eq Emotion Source # | |
Read Emotion Source # | |
Show Emotion Source # | |
Generic Emotion Source # | |
NFData Emotion Source # | |
Defined in Amazonka.Rekognition.Types.Emotion | |
Hashable Emotion Source # | |
Defined in Amazonka.Rekognition.Types.Emotion | |
FromJSON Emotion Source # | |
type Rep Emotion Source # | |
Defined in Amazonka.Rekognition.Types.Emotion type Rep Emotion = D1 ('MetaData "Emotion" "Amazonka.Rekognition.Types.Emotion" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Emotion'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "type'") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe EmotionName)))) |
newEmotion :: Emotion Source #
Create a value of Emotion
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Emotion'
, emotion_confidence
- Level of confidence in the determination.
$sel:type':Emotion'
, emotion_type
- Type of emotion detected.
emotion_confidence :: Lens' Emotion (Maybe Double) Source #
Level of confidence in the determination.
emotion_type :: Lens' Emotion (Maybe EmotionName) Source #
Type of emotion detected.
EquipmentDetection
data EquipmentDetection Source #
Information about an item of Personal Protective Equipment (PPE) detected by DetectProtectiveEquipment. For more information, see DetectProtectiveEquipment.
See: newEquipmentDetection
smart constructor.
EquipmentDetection' | |
|
Instances
newEquipmentDetection :: EquipmentDetection Source #
Create a value of EquipmentDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:EquipmentDetection'
, equipmentDetection_boundingBox
- A bounding box surrounding the item of detected PPE.
$sel:coversBodyPart:EquipmentDetection'
, equipmentDetection_coversBodyPart
- Information about the body part covered by the detected PPE.
$sel:confidence:EquipmentDetection'
, equipmentDetection_confidence
- The confidence that Amazon Rekognition has that the bounding box
(BoundingBox
) contains an item of PPE.
$sel:type':EquipmentDetection'
, equipmentDetection_type
- The type of detected PPE.
equipmentDetection_boundingBox :: Lens' EquipmentDetection (Maybe BoundingBox) Source #
A bounding box surrounding the item of detected PPE.
equipmentDetection_coversBodyPart :: Lens' EquipmentDetection (Maybe CoversBodyPart) Source #
Information about the body part covered by the detected PPE.
equipmentDetection_confidence :: Lens' EquipmentDetection (Maybe Double) Source #
The confidence that Amazon Rekognition has that the bounding box
(BoundingBox
) contains an item of PPE.
equipmentDetection_type :: Lens' EquipmentDetection (Maybe ProtectiveEquipmentType) Source #
The type of detected PPE.
EvaluationResult
data EvaluationResult Source #
The evaluation results for the training of a model.
See: newEvaluationResult
smart constructor.
EvaluationResult' | |
|
Instances
newEvaluationResult :: EvaluationResult Source #
Create a value of EvaluationResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:summary:EvaluationResult'
, evaluationResult_summary
- The S3 bucket that contains the training summary.
$sel:f1Score:EvaluationResult'
, evaluationResult_f1Score
- The F1 score for the evaluation of all labels. The F1 score metric
evaluates the overall precision and recall performance of the model as a
single value. A higher value indicates better precision and recall
performance. A lower score indicates that precision, recall, or both are
performing poorly.
evaluationResult_summary :: Lens' EvaluationResult (Maybe Summary) Source #
The S3 bucket that contains the training summary.
evaluationResult_f1Score :: Lens' EvaluationResult (Maybe Double) Source #
The F1 score for the evaluation of all labels. The F1 score metric evaluates the overall precision and recall performance of the model as a single value. A higher value indicates better precision and recall performance. A lower score indicates that precision, recall, or both are performing poorly.
EyeOpen
Indicates whether or not the eyes on the face are open, and the confidence level in the determination.
See: newEyeOpen
smart constructor.
Instances
Eq EyeOpen Source # | |
Read EyeOpen Source # | |
Show EyeOpen Source # | |
Generic EyeOpen Source # | |
NFData EyeOpen Source # | |
Defined in Amazonka.Rekognition.Types.EyeOpen | |
Hashable EyeOpen Source # | |
Defined in Amazonka.Rekognition.Types.EyeOpen | |
FromJSON EyeOpen Source # | |
type Rep EyeOpen Source # | |
Defined in Amazonka.Rekognition.Types.EyeOpen type Rep EyeOpen = D1 ('MetaData "EyeOpen" "Amazonka.Rekognition.Types.EyeOpen" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "EyeOpen'" 'PrefixI 'True) (S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)) :*: S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
newEyeOpen :: EyeOpen Source #
Create a value of EyeOpen
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:EyeOpen'
, eyeOpen_value
- Boolean value that indicates whether the eyes on the face are open.
$sel:confidence:EyeOpen'
, eyeOpen_confidence
- Level of confidence in the determination.
eyeOpen_value :: Lens' EyeOpen (Maybe Bool) Source #
Boolean value that indicates whether the eyes on the face are open.
eyeOpen_confidence :: Lens' EyeOpen (Maybe Double) Source #
Level of confidence in the determination.
Eyeglasses
data Eyeglasses Source #
Indicates whether or not the face is wearing eye glasses, and the confidence level in the determination.
See: newEyeglasses
smart constructor.
Instances
newEyeglasses :: Eyeglasses Source #
Create a value of Eyeglasses
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:Eyeglasses'
, eyeglasses_value
- Boolean value that indicates whether the face is wearing eye glasses or
not.
$sel:confidence:Eyeglasses'
, eyeglasses_confidence
- Level of confidence in the determination.
eyeglasses_value :: Lens' Eyeglasses (Maybe Bool) Source #
Boolean value that indicates whether the face is wearing eye glasses or not.
eyeglasses_confidence :: Lens' Eyeglasses (Maybe Double) Source #
Level of confidence in the determination.
Face
Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.
See: newFace
smart constructor.
Face' | |
|
Instances
Create a value of Face
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:faceId:Face'
, face_faceId
- Unique identifier that Amazon Rekognition assigns to the face.
$sel:boundingBox:Face'
, face_boundingBox
- Bounding box of the face.
$sel:externalImageId:Face'
, face_externalImageId
- Identifier that you assign to all the faces in the input image.
$sel:confidence:Face'
, face_confidence
- Confidence level that the bounding box contains a face (and not a
different object such as a tree).
$sel:imageId:Face'
, face_imageId
- Unique identifier that Amazon Rekognition assigns to the input image.
face_faceId :: Lens' Face (Maybe Text) Source #
Unique identifier that Amazon Rekognition assigns to the face.
face_boundingBox :: Lens' Face (Maybe BoundingBox) Source #
Bounding box of the face.
face_externalImageId :: Lens' Face (Maybe Text) Source #
Identifier that you assign to all the faces in the input image.
face_confidence :: Lens' Face (Maybe Double) Source #
Confidence level that the bounding box contains a face (and not a different object such as a tree).
face_imageId :: Lens' Face (Maybe Text) Source #
Unique identifier that Amazon Rekognition assigns to the input image.
FaceDetail
data FaceDetail Source #
Structure containing attributes of the face that the algorithm detected.
A FaceDetail
object contains either the default facial attributes or
all facial attributes. The default attributes are BoundingBox
,
Confidence
, Landmarks
, Pose
, and Quality
.
GetFaceDetection is the only Amazon Rekognition Video stored video
operation that can return a FaceDetail
object with all attributes. To
specify which attributes to return, use the FaceAttributes
input
parameter for StartFaceDetection. The following Amazon Rekognition Video
operations return only the default attributes. The corresponding Start
operations don't have a FaceAttributes
input parameter.
- GetCelebrityRecognition
- GetPersonTracking
- GetFaceSearch
The Amazon Rekognition Image DetectFaces and IndexFaces operations can
return all facial attributes. To specify which attributes to return, use
the Attributes
input parameter for DetectFaces
. For IndexFaces
,
use the DetectAttributes
input parameter.
See: newFaceDetail
smart constructor.
FaceDetail' | |
|
Instances
newFaceDetail :: FaceDetail Source #
Create a value of FaceDetail
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:ageRange:FaceDetail'
, faceDetail_ageRange
- The estimated age range, in years, for the face. Low represents the
lowest estimated age and High represents the highest estimated age.
$sel:sunglasses:FaceDetail'
, faceDetail_sunglasses
- Indicates whether or not the face is wearing sunglasses, and the
confidence level in the determination.
$sel:mouthOpen:FaceDetail'
, faceDetail_mouthOpen
- Indicates whether or not the mouth on the face is open, and the
confidence level in the determination.
$sel:boundingBox:FaceDetail'
, faceDetail_boundingBox
- Bounding box of the face. Default attribute.
$sel:emotions:FaceDetail'
, faceDetail_emotions
- The emotions that appear to be expressed on the face, and the confidence
level in the determination. The API is only making a determination of
the physical appearance of a person's face. It is not a determination
of the person’s internal emotional state and should not be used in such
a way. For example, a person pretending to have a sad face might not be
sad emotionally.
$sel:eyesOpen:FaceDetail'
, faceDetail_eyesOpen
- Indicates whether or not the eyes on the face are open, and the
confidence level in the determination.
$sel:pose:FaceDetail'
, faceDetail_pose
- Indicates the pose of the face as determined by its pitch, roll, and
yaw. Default attribute.
$sel:confidence:FaceDetail'
, faceDetail_confidence
- Confidence level that the bounding box contains a face (and not a
different object such as a tree). Default attribute.
$sel:gender:FaceDetail'
, faceDetail_gender
- The predicted gender of a detected face.
$sel:quality:FaceDetail'
, faceDetail_quality
- Identifies image brightness and sharpness. Default attribute.
$sel:eyeglasses:FaceDetail'
, faceDetail_eyeglasses
- Indicates whether or not the face is wearing eye glasses, and the
confidence level in the determination.
$sel:beard:FaceDetail'
, faceDetail_beard
- Indicates whether or not the face has a beard, and the confidence level
in the determination.
$sel:mustache:FaceDetail'
, faceDetail_mustache
- Indicates whether or not the face has a mustache, and the confidence
level in the determination.
$sel:smile:FaceDetail'
, faceDetail_smile
- Indicates whether or not the face is smiling, and the confidence level
in the determination.
$sel:landmarks:FaceDetail'
, faceDetail_landmarks
- Indicates the location of landmarks on the face. Default attribute.
faceDetail_ageRange :: Lens' FaceDetail (Maybe AgeRange) Source #
The estimated age range, in years, for the face. Low represents the lowest estimated age and High represents the highest estimated age.
faceDetail_sunglasses :: Lens' FaceDetail (Maybe Sunglasses) Source #
Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.
faceDetail_mouthOpen :: Lens' FaceDetail (Maybe MouthOpen) Source #
Indicates whether or not the mouth on the face is open, and the confidence level in the determination.
faceDetail_boundingBox :: Lens' FaceDetail (Maybe BoundingBox) Source #
Bounding box of the face. Default attribute.
faceDetail_emotions :: Lens' FaceDetail (Maybe [Emotion]) Source #
The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.
faceDetail_eyesOpen :: Lens' FaceDetail (Maybe EyeOpen) Source #
Indicates whether or not the eyes on the face are open, and the confidence level in the determination.
faceDetail_pose :: Lens' FaceDetail (Maybe Pose) Source #
Indicates the pose of the face as determined by its pitch, roll, and yaw. Default attribute.
faceDetail_confidence :: Lens' FaceDetail (Maybe Double) Source #
Confidence level that the bounding box contains a face (and not a different object such as a tree). Default attribute.
faceDetail_gender :: Lens' FaceDetail (Maybe Gender) Source #
The predicted gender of a detected face.
faceDetail_quality :: Lens' FaceDetail (Maybe ImageQuality) Source #
Identifies image brightness and sharpness. Default attribute.
faceDetail_eyeglasses :: Lens' FaceDetail (Maybe Eyeglasses) Source #
Indicates whether or not the face is wearing eye glasses, and the confidence level in the determination.
faceDetail_beard :: Lens' FaceDetail (Maybe Beard) Source #
Indicates whether or not the face has a beard, and the confidence level in the determination.
faceDetail_mustache :: Lens' FaceDetail (Maybe Mustache) Source #
Indicates whether or not the face has a mustache, and the confidence level in the determination.
faceDetail_smile :: Lens' FaceDetail (Maybe Smile) Source #
Indicates whether or not the face is smiling, and the confidence level in the determination.
faceDetail_landmarks :: Lens' FaceDetail (Maybe [Landmark]) Source #
Indicates the location of landmarks on the face. Default attribute.
FaceDetection
data FaceDetection Source #
Information about a face detected in a video analysis request and the time the face was detected in the video.
See: newFaceDetection
smart constructor.
Instances
newFaceDetection :: FaceDetection Source #
Create a value of FaceDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:timestamp:FaceDetection'
, faceDetection_timestamp
- Time, in milliseconds from the start of the video, that the face was
detected.
$sel:face:FaceDetection'
, faceDetection_face
- The face properties for the detected face.
faceDetection_timestamp :: Lens' FaceDetection (Maybe Integer) Source #
Time, in milliseconds from the start of the video, that the face was detected.
faceDetection_face :: Lens' FaceDetection (Maybe FaceDetail) Source #
The face properties for the detected face.
FaceMatch
Provides face metadata. In addition, it also provides the confidence in the match of this face with the input face.
See: newFaceMatch
smart constructor.
Instances
Eq FaceMatch Source # | |
Read FaceMatch Source # | |
Show FaceMatch Source # | |
Generic FaceMatch Source # | |
NFData FaceMatch Source # | |
Defined in Amazonka.Rekognition.Types.FaceMatch | |
Hashable FaceMatch Source # | |
Defined in Amazonka.Rekognition.Types.FaceMatch | |
FromJSON FaceMatch Source # | |
type Rep FaceMatch Source # | |
Defined in Amazonka.Rekognition.Types.FaceMatch type Rep FaceMatch = D1 ('MetaData "FaceMatch" "Amazonka.Rekognition.Types.FaceMatch" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "FaceMatch'" 'PrefixI 'True) (S1 ('MetaSel ('Just "similarity") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "face") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Face)))) |
newFaceMatch :: FaceMatch Source #
Create a value of FaceMatch
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:similarity:FaceMatch'
, faceMatch_similarity
- Confidence in the match of this face with the input face.
$sel:face:FaceMatch'
, faceMatch_face
- Describes the face properties such as the bounding box, face ID, image
ID of the source image, and external image ID that you assigned.
faceMatch_similarity :: Lens' FaceMatch (Maybe Double) Source #
Confidence in the match of this face with the input face.
faceMatch_face :: Lens' FaceMatch (Maybe Face) Source #
Describes the face properties such as the bounding box, face ID, image ID of the source image, and external image ID that you assigned.
FaceRecord
data FaceRecord Source #
Object containing both the face metadata (stored in the backend database), and facial attributes that are detected but aren't stored in the database.
See: newFaceRecord
smart constructor.
FaceRecord' | |
|
Instances
newFaceRecord :: FaceRecord Source #
Create a value of FaceRecord
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:faceDetail:FaceRecord'
, faceRecord_faceDetail
- Structure containing attributes of the face that the algorithm detected.
$sel:face:FaceRecord'
, faceRecord_face
- Describes the face properties such as the bounding box, face ID, image
ID of the input image, and external image ID that you assigned.
faceRecord_faceDetail :: Lens' FaceRecord (Maybe FaceDetail) Source #
Structure containing attributes of the face that the algorithm detected.
faceRecord_face :: Lens' FaceRecord (Maybe Face) Source #
Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.
FaceSearchSettings
data FaceSearchSettings Source #
Input face recognition parameters for an Amazon Rekognition stream
processor. FaceRecognitionSettings
is a request parameter for
CreateStreamProcessor.
See: newFaceSearchSettings
smart constructor.
FaceSearchSettings' | |
|
Instances
newFaceSearchSettings :: FaceSearchSettings Source #
Create a value of FaceSearchSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:faceMatchThreshold:FaceSearchSettings'
, faceSearchSettings_faceMatchThreshold
- Minimum face match confidence score that must be met to return a result
for a recognized face. Default is 80. 0 is the lowest confidence. 100 is
the highest confidence.
$sel:collectionId:FaceSearchSettings'
, faceSearchSettings_collectionId
- The ID of a collection that contains faces that you want to search for.
faceSearchSettings_faceMatchThreshold :: Lens' FaceSearchSettings (Maybe Double) Source #
Minimum face match confidence score that must be met to return a result for a recognized face. Default is 80. 0 is the lowest confidence. 100 is the highest confidence.
faceSearchSettings_collectionId :: Lens' FaceSearchSettings (Maybe Text) Source #
The ID of a collection that contains faces that you want to search for.
Gender
The predicted gender of a detected face.
Amazon Rekognition makes gender binary (male/female) predictions based on the physical appearance of a face in a particular image. This kind of prediction is not designed to categorize a person’s gender identity, and you shouldn't use Amazon Rekognition to make such a determination. For example, a male actor wearing a long-haired wig and earrings for a role might be predicted as female.
Using Amazon Rekognition to make gender binary predictions is best suited for use cases where aggregate gender distribution statistics need to be analyzed without identifying specific users. For example, the percentage of female users compared to male users on a social media platform.
We don't recommend using gender binary predictions to make decisions that impact an individual's rights, privacy, or access to services.
See: newGender
smart constructor.
Gender' | |
|
Instances
Eq Gender Source # | |
Read Gender Source # | |
Show Gender Source # | |
Generic Gender Source # | |
NFData Gender Source # | |
Defined in Amazonka.Rekognition.Types.Gender | |
Hashable Gender Source # | |
Defined in Amazonka.Rekognition.Types.Gender | |
FromJSON Gender Source # | |
type Rep Gender Source # | |
Defined in Amazonka.Rekognition.Types.Gender type Rep Gender = D1 ('MetaData "Gender" "Amazonka.Rekognition.Types.Gender" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Gender'" 'PrefixI 'True) (S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe GenderType)) :*: S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
Create a value of Gender
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:Gender'
, gender_value
- The predicted gender of the face.
$sel:confidence:Gender'
, gender_confidence
- Level of confidence in the prediction.
gender_value :: Lens' Gender (Maybe GenderType) Source #
The predicted gender of the face.
Geometry
Information about where an object (DetectCustomLabels) or text (DetectText) is located on an image.
See: newGeometry
smart constructor.
Geometry' | |
|
Instances
Eq Geometry Source # | |
Read Geometry Source # | |
Show Geometry Source # | |
Generic Geometry Source # | |
NFData Geometry Source # | |
Defined in Amazonka.Rekognition.Types.Geometry | |
Hashable Geometry Source # | |
Defined in Amazonka.Rekognition.Types.Geometry | |
FromJSON Geometry Source # | |
type Rep Geometry Source # | |
Defined in Amazonka.Rekognition.Types.Geometry type Rep Geometry = D1 ('MetaData "Geometry" "Amazonka.Rekognition.Types.Geometry" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Geometry'" 'PrefixI 'True) (S1 ('MetaSel ('Just "boundingBox") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe BoundingBox)) :*: S1 ('MetaSel ('Just "polygon") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe [Point])))) |
newGeometry :: Geometry Source #
Create a value of Geometry
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:Geometry'
, geometry_boundingBox
- An axis-aligned coarse representation of the detected item's location
on the image.
$sel:polygon:Geometry'
, geometry_polygon
- Within the bounding box, a fine-grained polygon around the detected
item.
geometry_boundingBox :: Lens' Geometry (Maybe BoundingBox) Source #
An axis-aligned coarse representation of the detected item's location on the image.
geometry_polygon :: Lens' Geometry (Maybe [Point]) Source #
Within the bounding box, a fine-grained polygon around the detected item.
GroundTruthManifest
data GroundTruthManifest Source #
The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest file.
See: newGroundTruthManifest
smart constructor.
Instances
newGroundTruthManifest :: GroundTruthManifest Source #
Create a value of GroundTruthManifest
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Object:GroundTruthManifest'
, groundTruthManifest_s3Object
- Undocumented member.
groundTruthManifest_s3Object :: Lens' GroundTruthManifest (Maybe S3Object) Source #
Undocumented member.
HumanLoopActivationOutput
data HumanLoopActivationOutput Source #
Shows the results of the human in the loop evaluation. If there is no HumanLoopArn, the input did not trigger human review.
See: newHumanLoopActivationOutput
smart constructor.
HumanLoopActivationOutput' | |
|
Instances
newHumanLoopActivationOutput :: HumanLoopActivationOutput Source #
Create a value of HumanLoopActivationOutput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:humanLoopActivationReasons:HumanLoopActivationOutput'
, humanLoopActivationOutput_humanLoopActivationReasons
- Shows if and why human review was needed.
$sel:humanLoopArn:HumanLoopActivationOutput'
, humanLoopActivationOutput_humanLoopArn
- The Amazon Resource Name (ARN) of the HumanLoop created.
$sel:humanLoopActivationConditionsEvaluationResults:HumanLoopActivationOutput'
, humanLoopActivationOutput_humanLoopActivationConditionsEvaluationResults
- Shows the result of condition evaluations, including those conditions
which activated a human review.
humanLoopActivationOutput_humanLoopActivationReasons :: Lens' HumanLoopActivationOutput (Maybe (NonEmpty Text)) Source #
Shows if and why human review was needed.
humanLoopActivationOutput_humanLoopArn :: Lens' HumanLoopActivationOutput (Maybe Text) Source #
The Amazon Resource Name (ARN) of the HumanLoop created.
humanLoopActivationOutput_humanLoopActivationConditionsEvaluationResults :: Lens' HumanLoopActivationOutput (Maybe Text) Source #
Shows the result of condition evaluations, including those conditions which activated a human review.
HumanLoopConfig
data HumanLoopConfig Source #
Sets up the flow definition the image will be sent to if one of the conditions is met. You can also set certain attributes of the image before review.
See: newHumanLoopConfig
smart constructor.
HumanLoopConfig' | |
|
Instances
:: Text | |
-> Text | |
-> HumanLoopConfig |
Create a value of HumanLoopConfig
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataAttributes:HumanLoopConfig'
, humanLoopConfig_dataAttributes
- Sets attributes of the input data.
$sel:humanLoopName:HumanLoopConfig'
, humanLoopConfig_humanLoopName
- The name of the human review used for this image. This should be kept
unique within a region.
$sel:flowDefinitionArn:HumanLoopConfig'
, humanLoopConfig_flowDefinitionArn
- The Amazon Resource Name (ARN) of the flow definition. You can create a
flow definition by using the Amazon Sagemaker
CreateFlowDefinition
Operation.
humanLoopConfig_dataAttributes :: Lens' HumanLoopConfig (Maybe HumanLoopDataAttributes) Source #
Sets attributes of the input data.
humanLoopConfig_humanLoopName :: Lens' HumanLoopConfig Text Source #
The name of the human review used for this image. This should be kept unique within a region.
humanLoopConfig_flowDefinitionArn :: Lens' HumanLoopConfig Text Source #
The Amazon Resource Name (ARN) of the flow definition. You can create a flow definition by using the Amazon Sagemaker CreateFlowDefinition Operation.
HumanLoopDataAttributes
data HumanLoopDataAttributes Source #
Allows you to set attributes of the image. Currently, you can declare an image as free of personally identifiable information.
See: newHumanLoopDataAttributes
smart constructor.
HumanLoopDataAttributes' | |
|
Instances
newHumanLoopDataAttributes :: HumanLoopDataAttributes Source #
Create a value of HumanLoopDataAttributes
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:contentClassifiers:HumanLoopDataAttributes'
, humanLoopDataAttributes_contentClassifiers
- Sets whether the input image is free of personally identifiable
information.
humanLoopDataAttributes_contentClassifiers :: Lens' HumanLoopDataAttributes (Maybe [ContentClassifier]) Source #
Sets whether the input image is free of personally identifiable information.
Image
Provides the input image either as bytes or an S3 object.
You pass image bytes to an Amazon Rekognition API operation by using the
Bytes
property. For example, you would use the Bytes
property to
pass an image loaded from a local file system. Image bytes passed by
using the Bytes
property must be base64-encoded. Your code may not
need to encode image bytes if you are using an AWS SDK to call Amazon
Rekognition API operations.
For more information, see Analyzing an Image Loaded from a Local File System in the Amazon Rekognition Developer Guide.
You pass images stored in an S3 bucket to an Amazon Rekognition API
operation by using the S3Object
property. Images stored in an S3
bucket do not need to be base64-encoded.
The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.
If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.
For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide.
See: newImage
smart constructor.
Instances
Eq Image Source # | |
Read Image Source # | |
Show Image Source # | |
Generic Image Source # | |
NFData Image Source # | |
Defined in Amazonka.Rekognition.Types.Image | |
Hashable Image Source # | |
Defined in Amazonka.Rekognition.Types.Image | |
ToJSON Image Source # | |
Defined in Amazonka.Rekognition.Types.Image | |
type Rep Image Source # | |
Defined in Amazonka.Rekognition.Types.Image type Rep Image = D1 ('MetaData "Image" "Amazonka.Rekognition.Types.Image" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Image'" 'PrefixI 'True) (S1 ('MetaSel ('Just "s3Object") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe S3Object)) :*: S1 ('MetaSel ('Just "bytes") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Base64)))) |
Create a value of Image
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Object:Image'
, image_s3Object
- Identifies an S3 object as the image source.
$sel:bytes:Image'
, image_bytes
- Blob of image bytes up to 5 MBs.--
-- Note: This Lens
automatically encodes and decodes Base64 data.
-- The underlying isomorphism will encode to Base64 representation during
-- serialisation, and decode from Base64 representation during deserialisation.
-- This Lens
accepts and returns only raw unencoded data.
image_bytes :: Lens' Image (Maybe ByteString) Source #
Blob of image bytes up to 5 MBs.--
-- Note: This Lens
automatically encodes and decodes Base64 data.
-- The underlying isomorphism will encode to Base64 representation during
-- serialisation, and decode from Base64 representation during deserialisation.
-- This Lens
accepts and returns only raw unencoded data.
ImageQuality
data ImageQuality Source #
Identifies face image brightness and sharpness.
See: newImageQuality
smart constructor.
ImageQuality' | |
|
Instances
newImageQuality :: ImageQuality Source #
Create a value of ImageQuality
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:sharpness:ImageQuality'
, imageQuality_sharpness
- Value representing sharpness of the face. The service returns a value
between 0 and 100 (inclusive). A higher value indicates a sharper face
image.
$sel:brightness:ImageQuality'
, imageQuality_brightness
- Value representing brightness of the face. The service returns a value
between 0 and 100 (inclusive). A higher value indicates a brighter face
image.
imageQuality_sharpness :: Lens' ImageQuality (Maybe Double) Source #
Value representing sharpness of the face. The service returns a value between 0 and 100 (inclusive). A higher value indicates a sharper face image.
imageQuality_brightness :: Lens' ImageQuality (Maybe Double) Source #
Value representing brightness of the face. The service returns a value between 0 and 100 (inclusive). A higher value indicates a brighter face image.
Instance
An instance of a label returned by Amazon Rekognition Image (DetectLabels) or by Amazon Rekognition Video (GetLabelDetection).
See: newInstance
smart constructor.
Instance' | |
|
Instances
Eq Instance Source # | |
Read Instance Source # | |
Show Instance Source # | |
Generic Instance Source # | |
NFData Instance Source # | |
Defined in Amazonka.Rekognition.Types.Instance | |
Hashable Instance Source # | |
Defined in Amazonka.Rekognition.Types.Instance | |
FromJSON Instance Source # | |
type Rep Instance Source # | |
Defined in Amazonka.Rekognition.Types.Instance type Rep Instance = D1 ('MetaData "Instance" "Amazonka.Rekognition.Types.Instance" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Instance'" 'PrefixI 'True) (S1 ('MetaSel ('Just "boundingBox") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe BoundingBox)) :*: S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
newInstance :: Instance Source #
Create a value of Instance
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:Instance'
, instance_boundingBox
- The position of the label instance on the image.
$sel:confidence:Instance'
, instance_confidence
- The confidence that Amazon Rekognition has in the accuracy of the
bounding box.
instance_boundingBox :: Lens' Instance (Maybe BoundingBox) Source #
The position of the label instance on the image.
instance_confidence :: Lens' Instance (Maybe Double) Source #
The confidence that Amazon Rekognition has in the accuracy of the bounding box.
KinesisDataStream
data KinesisDataStream Source #
The Kinesis data stream Amazon Rekognition to which the analysis results of a Amazon Rekognition stream processor are streamed. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
See: newKinesisDataStream
smart constructor.
Instances
newKinesisDataStream :: KinesisDataStream Source #
Create a value of KinesisDataStream
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:arn:KinesisDataStream'
, kinesisDataStream_arn
- ARN of the output Amazon Kinesis Data Streams stream.
kinesisDataStream_arn :: Lens' KinesisDataStream (Maybe Text) Source #
ARN of the output Amazon Kinesis Data Streams stream.
KinesisVideoStream
data KinesisVideoStream Source #
Kinesis video stream stream that provides the source streaming video for a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
See: newKinesisVideoStream
smart constructor.
Instances
newKinesisVideoStream :: KinesisVideoStream Source #
Create a value of KinesisVideoStream
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:arn:KinesisVideoStream'
, kinesisVideoStream_arn
- ARN of the Kinesis video stream stream that streams the source video.
kinesisVideoStream_arn :: Lens' KinesisVideoStream (Maybe Text) Source #
ARN of the Kinesis video stream stream that streams the source video.
KnownGender
data KnownGender Source #
The known gender identity for the celebrity that matches the provided ID.
See: newKnownGender
smart constructor.
KnownGender' | |
|
Instances
newKnownGender :: KnownGender Source #
Create a value of KnownGender
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:type':KnownGender'
, knownGender_type
- A string value of the KnownGender info about the Celebrity.
knownGender_type :: Lens' KnownGender (Maybe KnownGenderType) Source #
A string value of the KnownGender info about the Celebrity.
Label
Structure containing details about the detected label, including the name, detected instances, parent labels, and level of confidence.
See: newLabel
smart constructor.
Label' | |
|
Instances
Eq Label Source # | |
Read Label Source # | |
Show Label Source # | |
Generic Label Source # | |
NFData Label Source # | |
Defined in Amazonka.Rekognition.Types.Label | |
Hashable Label Source # | |
Defined in Amazonka.Rekognition.Types.Label | |
FromJSON Label Source # | |
type Rep Label Source # | |
Defined in Amazonka.Rekognition.Types.Label type Rep Label = D1 ('MetaData "Label" "Amazonka.Rekognition.Types.Label" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Label'" 'PrefixI 'True) ((S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "parents") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe [Parent]))) :*: (S1 ('MetaSel ('Just "name") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "instances") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe [Instance]))))) |
Create a value of Label
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Label'
, label_confidence
- Level of confidence.
$sel:parents:Label'
, label_parents
- The parent labels for a label. The response includes all ancestor
labels.
$sel:name:Label'
, label_name
- The name (label) of the object or scene.
$sel:instances:Label'
, label_instances
- If Label
represents an object, Instances
contains the bounding boxes
for each instance of the detected object. Bounding boxes are returned
for common object labels such as people, cars, furniture, apparel or
pets.
label_parents :: Lens' Label (Maybe [Parent]) Source #
The parent labels for a label. The response includes all ancestor labels.
label_instances :: Lens' Label (Maybe [Instance]) Source #
If Label
represents an object, Instances
contains the bounding boxes
for each instance of the detected object. Bounding boxes are returned
for common object labels such as people, cars, furniture, apparel or
pets.
LabelDetection
data LabelDetection Source #
Information about a label detected in a video analysis request and the time the label was detected in the video.
See: newLabelDetection
smart constructor.
Instances
newLabelDetection :: LabelDetection Source #
Create a value of LabelDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:label:LabelDetection'
, labelDetection_label
- Details about the detected label.
$sel:timestamp:LabelDetection'
, labelDetection_timestamp
- Time, in milliseconds from the start of the video, that the label was
detected.
labelDetection_label :: Lens' LabelDetection (Maybe Label) Source #
Details about the detected label.
labelDetection_timestamp :: Lens' LabelDetection (Maybe Integer) Source #
Time, in milliseconds from the start of the video, that the label was detected.
Landmark
Indicates the location of the landmark on the face.
See: newLandmark
smart constructor.
Landmark' | |
|
Instances
Eq Landmark Source # | |
Read Landmark Source # | |
Show Landmark Source # | |
Generic Landmark Source # | |
NFData Landmark Source # | |
Defined in Amazonka.Rekognition.Types.Landmark | |
Hashable Landmark Source # | |
Defined in Amazonka.Rekognition.Types.Landmark | |
FromJSON Landmark Source # | |
type Rep Landmark Source # | |
Defined in Amazonka.Rekognition.Types.Landmark type Rep Landmark = D1 ('MetaData "Landmark" "Amazonka.Rekognition.Types.Landmark" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Landmark'" 'PrefixI 'True) (S1 ('MetaSel ('Just "type'") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe LandmarkType)) :*: (S1 ('MetaSel ('Just "x") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "y") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double))))) |
newLandmark :: Landmark Source #
Create a value of Landmark
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:type':Landmark'
, landmark_type
- Type of landmark.
$sel:x:Landmark'
, landmark_x
- The x-coordinate of the landmark expressed as a ratio of the width of
the image. The x-coordinate is measured from the left-side of the image.
For example, if the image is 700 pixels wide and the x-coordinate of the
landmark is at 350 pixels, this value is 0.5.
$sel:y:Landmark'
, landmark_y
- The y-coordinate of the landmark expressed as a ratio of the height of
the image. The y-coordinate is measured from the top of the image. For
example, if the image height is 200 pixels and the y-coordinate of the
landmark is at 50 pixels, this value is 0.25.
landmark_type :: Lens' Landmark (Maybe LandmarkType) Source #
Type of landmark.
landmark_x :: Lens' Landmark (Maybe Double) Source #
The x-coordinate of the landmark expressed as a ratio of the width of the image. The x-coordinate is measured from the left-side of the image. For example, if the image is 700 pixels wide and the x-coordinate of the landmark is at 350 pixels, this value is 0.5.
landmark_y :: Lens' Landmark (Maybe Double) Source #
The y-coordinate of the landmark expressed as a ratio of the height of the image. The y-coordinate is measured from the top of the image. For example, if the image height is 200 pixels and the y-coordinate of the landmark is at 50 pixels, this value is 0.25.
ModerationLabel
data ModerationLabel Source #
Provides information about a single type of inappropriate, unwanted, or offensive content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Content moderation in the Amazon Rekognition Developer Guide.
See: newModerationLabel
smart constructor.
ModerationLabel' | |
|
Instances
newModerationLabel :: ModerationLabel Source #
Create a value of ModerationLabel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:ModerationLabel'
, moderationLabel_confidence
- Specifies the confidence that Amazon Rekognition has that the label has
been correctly identified.
If you don't specify the MinConfidence
parameter in the call to
DetectModerationLabels
, the operation returns labels with a confidence
value greater than or equal to 50 percent.
$sel:name:ModerationLabel'
, moderationLabel_name
- The label name for the type of unsafe content detected in the image.
$sel:parentName:ModerationLabel'
, moderationLabel_parentName
- The name for the parent label. Labels at the top level of the hierarchy
have the parent label ""
.
moderationLabel_confidence :: Lens' ModerationLabel (Maybe Double) Source #
Specifies the confidence that Amazon Rekognition has that the label has been correctly identified.
If you don't specify the MinConfidence
parameter in the call to
DetectModerationLabels
, the operation returns labels with a confidence
value greater than or equal to 50 percent.
moderationLabel_name :: Lens' ModerationLabel (Maybe Text) Source #
The label name for the type of unsafe content detected in the image.
moderationLabel_parentName :: Lens' ModerationLabel (Maybe Text) Source #
The name for the parent label. Labels at the top level of the hierarchy
have the parent label ""
.
MouthOpen
Indicates whether or not the mouth on the face is open, and the confidence level in the determination.
See: newMouthOpen
smart constructor.
Instances
Eq MouthOpen Source # | |
Read MouthOpen Source # | |
Show MouthOpen Source # | |
Generic MouthOpen Source # | |
NFData MouthOpen Source # | |
Defined in Amazonka.Rekognition.Types.MouthOpen | |
Hashable MouthOpen Source # | |
Defined in Amazonka.Rekognition.Types.MouthOpen | |
FromJSON MouthOpen Source # | |
type Rep MouthOpen Source # | |
Defined in Amazonka.Rekognition.Types.MouthOpen type Rep MouthOpen = D1 ('MetaData "MouthOpen" "Amazonka.Rekognition.Types.MouthOpen" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "MouthOpen'" 'PrefixI 'True) (S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)) :*: S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
newMouthOpen :: MouthOpen Source #
Create a value of MouthOpen
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:MouthOpen'
, mouthOpen_value
- Boolean value that indicates whether the mouth on the face is open or
not.
$sel:confidence:MouthOpen'
, mouthOpen_confidence
- Level of confidence in the determination.
mouthOpen_value :: Lens' MouthOpen (Maybe Bool) Source #
Boolean value that indicates whether the mouth on the face is open or not.
mouthOpen_confidence :: Lens' MouthOpen (Maybe Double) Source #
Level of confidence in the determination.
Mustache
Indicates whether or not the face has a mustache, and the confidence level in the determination.
See: newMustache
smart constructor.
Instances
Eq Mustache Source # | |
Read Mustache Source # | |
Show Mustache Source # | |
Generic Mustache Source # | |
NFData Mustache Source # | |
Defined in Amazonka.Rekognition.Types.Mustache | |
Hashable Mustache Source # | |
Defined in Amazonka.Rekognition.Types.Mustache | |
FromJSON Mustache Source # | |
type Rep Mustache Source # | |
Defined in Amazonka.Rekognition.Types.Mustache type Rep Mustache = D1 ('MetaData "Mustache" "Amazonka.Rekognition.Types.Mustache" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Mustache'" 'PrefixI 'True) (S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)) :*: S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
newMustache :: Mustache Source #
Create a value of Mustache
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:Mustache'
, mustache_value
- Boolean value that indicates whether the face has mustache or not.
$sel:confidence:Mustache'
, mustache_confidence
- Level of confidence in the determination.
mustache_value :: Lens' Mustache (Maybe Bool) Source #
Boolean value that indicates whether the face has mustache or not.
mustache_confidence :: Lens' Mustache (Maybe Double) Source #
Level of confidence in the determination.
NotificationChannel
data NotificationChannel Source #
The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see api-video. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic. For more information, see Giving access to multiple Amazon SNS topics.
See: newNotificationChannel
smart constructor.
NotificationChannel' | |
|
Instances
newNotificationChannel Source #
:: Text | |
-> Text | |
-> NotificationChannel |
Create a value of NotificationChannel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:sNSTopicArn:NotificationChannel'
, notificationChannel_sNSTopicArn
- The Amazon SNS topic to which Amazon Rekognition to posts the completion
status.
$sel:roleArn:NotificationChannel'
, notificationChannel_roleArn
- The ARN of an IAM role that gives Amazon Rekognition publishing
permissions to the Amazon SNS topic.
notificationChannel_sNSTopicArn :: Lens' NotificationChannel Text Source #
The Amazon SNS topic to which Amazon Rekognition to posts the completion status.
notificationChannel_roleArn :: Lens' NotificationChannel Text Source #
The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.
OutputConfig
data OutputConfig Source #
The S3 bucket and folder location where training output is placed.
See: newOutputConfig
smart constructor.
Instances
newOutputConfig :: OutputConfig Source #
Create a value of OutputConfig
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3KeyPrefix:OutputConfig'
, outputConfig_s3KeyPrefix
- The prefix applied to the training output files.
$sel:s3Bucket:OutputConfig'
, outputConfig_s3Bucket
- The S3 bucket where training output is placed.
outputConfig_s3KeyPrefix :: Lens' OutputConfig (Maybe Text) Source #
The prefix applied to the training output files.
outputConfig_s3Bucket :: Lens' OutputConfig (Maybe Text) Source #
The S3 bucket where training output is placed.
Parent
A parent label for a label. A label can have 0, 1, or more parents.
See: newParent
smart constructor.
Instances
Eq Parent Source # | |
Read Parent Source # | |
Show Parent Source # | |
Generic Parent Source # | |
NFData Parent Source # | |
Defined in Amazonka.Rekognition.Types.Parent | |
Hashable Parent Source # | |
Defined in Amazonka.Rekognition.Types.Parent | |
FromJSON Parent Source # | |
type Rep Parent Source # | |
Defined in Amazonka.Rekognition.Types.Parent |
Create a value of Parent
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:name:Parent'
, parent_name
- The name of the parent label.
PersonDetail
data PersonDetail Source #
Details about a person detected in a video analysis request.
See: newPersonDetail
smart constructor.
PersonDetail' | |
|
Instances
newPersonDetail :: PersonDetail Source #
Create a value of PersonDetail
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:PersonDetail'
, personDetail_boundingBox
- Bounding box around the detected person.
$sel:index:PersonDetail'
, personDetail_index
- Identifier for the person detected person within a video. Use to keep
track of the person throughout the video. The identifier is not stored
by Amazon Rekognition.
$sel:face:PersonDetail'
, personDetail_face
- Face details for the detected person.
personDetail_boundingBox :: Lens' PersonDetail (Maybe BoundingBox) Source #
Bounding box around the detected person.
personDetail_index :: Lens' PersonDetail (Maybe Integer) Source #
Identifier for the person detected person within a video. Use to keep track of the person throughout the video. The identifier is not stored by Amazon Rekognition.
personDetail_face :: Lens' PersonDetail (Maybe FaceDetail) Source #
Face details for the detected person.
PersonDetection
data PersonDetection Source #
Details and path tracking information for a single time a person's path
is tracked in a video. Amazon Rekognition operations that track
people's paths return an array of PersonDetection
objects with
elements for each time a person's path is tracked in a video.
For more information, see GetPersonTracking in the Amazon Rekognition Developer Guide.
See: newPersonDetection
smart constructor.
Instances
newPersonDetection :: PersonDetection Source #
Create a value of PersonDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:person:PersonDetection'
, personDetection_person
- Details about a person whose path was tracked in a video.
$sel:timestamp:PersonDetection'
, personDetection_timestamp
- The time, in milliseconds from the start of the video, that the
person's path was tracked.
personDetection_person :: Lens' PersonDetection (Maybe PersonDetail) Source #
Details about a person whose path was tracked in a video.
personDetection_timestamp :: Lens' PersonDetection (Maybe Integer) Source #
The time, in milliseconds from the start of the video, that the person's path was tracked.
PersonMatch
data PersonMatch Source #
Information about a person whose face matches a face(s) in an Amazon
Rekognition collection. Includes information about the faces in the
Amazon Rekognition collection (FaceMatch), information about the person
(PersonDetail), and the time stamp for when the person was detected in a
video. An array of PersonMatch
objects is returned by GetFaceSearch.
See: newPersonMatch
smart constructor.
PersonMatch' | |
|
Instances
newPersonMatch :: PersonMatch Source #
Create a value of PersonMatch
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:faceMatches:PersonMatch'
, personMatch_faceMatches
- Information about the faces in the input collection that match the face
of a person in the video.
$sel:person:PersonMatch'
, personMatch_person
- Information about the matched person.
$sel:timestamp:PersonMatch'
, personMatch_timestamp
- The time, in milliseconds from the beginning of the video, that the
person was matched in the video.
personMatch_faceMatches :: Lens' PersonMatch (Maybe [FaceMatch]) Source #
Information about the faces in the input collection that match the face of a person in the video.
personMatch_person :: Lens' PersonMatch (Maybe PersonDetail) Source #
Information about the matched person.
personMatch_timestamp :: Lens' PersonMatch (Maybe Integer) Source #
The time, in milliseconds from the beginning of the video, that the person was matched in the video.
Point
The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.
An array of Point
objects, Polygon
, is returned by DetectText and by
DetectCustomLabels. Polygon
represents a fine-grained polygon around a
detected item. For more information, see Geometry in the Amazon
Rekognition Developer Guide.
See: newPoint
smart constructor.
Instances
Eq Point Source # | |
Read Point Source # | |
Show Point Source # | |
Generic Point Source # | |
NFData Point Source # | |
Defined in Amazonka.Rekognition.Types.Point | |
Hashable Point Source # | |
Defined in Amazonka.Rekognition.Types.Point | |
FromJSON Point Source # | |
type Rep Point Source # | |
Defined in Amazonka.Rekognition.Types.Point type Rep Point = D1 ('MetaData "Point" "Amazonka.Rekognition.Types.Point" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Point'" 'PrefixI 'True) (S1 ('MetaSel ('Just "x") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "y") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
Create a value of Point
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:x:Point'
, point_x
- The value of the X coordinate for a point on a Polygon
.
$sel:y:Point'
, point_y
- The value of the Y coordinate for a point on a Polygon
.
point_x :: Lens' Point (Maybe Double) Source #
The value of the X coordinate for a point on a Polygon
.
point_y :: Lens' Point (Maybe Double) Source #
The value of the Y coordinate for a point on a Polygon
.
Pose
Indicates the pose of the face as determined by its pitch, roll, and yaw.
See: newPose
smart constructor.
Instances
Eq Pose Source # | |
Read Pose Source # | |
Show Pose Source # | |
Generic Pose Source # | |
NFData Pose Source # | |
Defined in Amazonka.Rekognition.Types.Pose | |
Hashable Pose Source # | |
Defined in Amazonka.Rekognition.Types.Pose | |
FromJSON Pose Source # | |
type Rep Pose Source # | |
Defined in Amazonka.Rekognition.Types.Pose type Rep Pose = D1 ('MetaData "Pose" "Amazonka.Rekognition.Types.Pose" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Pose'" 'PrefixI 'True) (S1 ('MetaSel ('Just "yaw") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: (S1 ('MetaSel ('Just "roll") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "pitch") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double))))) |
Create a value of Pose
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:yaw:Pose'
, pose_yaw
- Value representing the face rotation on the yaw axis.
$sel:roll:Pose'
, pose_roll
- Value representing the face rotation on the roll axis.
$sel:pitch:Pose'
, pose_pitch
- Value representing the face rotation on the pitch axis.
pose_roll :: Lens' Pose (Maybe Double) Source #
Value representing the face rotation on the roll axis.
pose_pitch :: Lens' Pose (Maybe Double) Source #
Value representing the face rotation on the pitch axis.
ProjectDescription
data ProjectDescription Source #
A description of a Amazon Rekognition Custom Labels project.
See: newProjectDescription
smart constructor.
ProjectDescription' | |
|
Instances
newProjectDescription :: ProjectDescription Source #
Create a value of ProjectDescription
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:status:ProjectDescription'
, projectDescription_status
- The current status of the project.
$sel:creationTimestamp:ProjectDescription'
, projectDescription_creationTimestamp
- The Unix timestamp for the date and time that the project was created.
$sel:projectArn:ProjectDescription'
, projectDescription_projectArn
- The Amazon Resource Name (ARN) of the project.
projectDescription_status :: Lens' ProjectDescription (Maybe ProjectStatus) Source #
The current status of the project.
projectDescription_creationTimestamp :: Lens' ProjectDescription (Maybe UTCTime) Source #
The Unix timestamp for the date and time that the project was created.
projectDescription_projectArn :: Lens' ProjectDescription (Maybe Text) Source #
The Amazon Resource Name (ARN) of the project.
ProjectVersionDescription
data ProjectVersionDescription Source #
The description of a version of a model.
See: newProjectVersionDescription
smart constructor.
ProjectVersionDescription' | |
|
Instances
newProjectVersionDescription :: ProjectVersionDescription Source #
Create a value of ProjectVersionDescription
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minInferenceUnits:ProjectVersionDescription'
, projectVersionDescription_minInferenceUnits
- The minimum number of inference units used by the model. For more
information, see StartProjectVersion.
$sel:status:ProjectVersionDescription'
, projectVersionDescription_status
- The current status of the model version.
$sel:evaluationResult:ProjectVersionDescription'
, projectVersionDescription_evaluationResult
- The training results. EvaluationResult
is only returned if training is
successful.
$sel:manifestSummary:ProjectVersionDescription'
, projectVersionDescription_manifestSummary
- The location of the summary manifest. The summary manifest provides
aggregate data validation results for the training and test datasets.
$sel:kmsKeyId:ProjectVersionDescription'
, projectVersionDescription_kmsKeyId
- The identifer for the AWS Key Management Service (AWS KMS) customer
master key that was used to encrypt the model during training.
$sel:testingDataResult:ProjectVersionDescription'
, projectVersionDescription_testingDataResult
- Contains information about the testing results.
$sel:statusMessage:ProjectVersionDescription'
, projectVersionDescription_statusMessage
- A descriptive message for an error or warning that occurred.
$sel:creationTimestamp:ProjectVersionDescription'
, projectVersionDescription_creationTimestamp
- The Unix datetime for the date and time that training started.
$sel:projectVersionArn:ProjectVersionDescription'
, projectVersionDescription_projectVersionArn
- The Amazon Resource Name (ARN) of the model version.
$sel:outputConfig:ProjectVersionDescription'
, projectVersionDescription_outputConfig
- The location where training results are saved.
$sel:billableTrainingTimeInSeconds:ProjectVersionDescription'
, projectVersionDescription_billableTrainingTimeInSeconds
- The duration, in seconds, that the model version has been billed for
training. This value is only returned if the model version has been
successfully trained.
$sel:trainingEndTimestamp:ProjectVersionDescription'
, projectVersionDescription_trainingEndTimestamp
- The Unix date and time that training of the model ended.
$sel:trainingDataResult:ProjectVersionDescription'
, projectVersionDescription_trainingDataResult
- Contains information about the training results.
projectVersionDescription_minInferenceUnits :: Lens' ProjectVersionDescription (Maybe Natural) Source #
The minimum number of inference units used by the model. For more information, see StartProjectVersion.
projectVersionDescription_status :: Lens' ProjectVersionDescription (Maybe ProjectVersionStatus) Source #
The current status of the model version.
projectVersionDescription_evaluationResult :: Lens' ProjectVersionDescription (Maybe EvaluationResult) Source #
The training results. EvaluationResult
is only returned if training is
successful.
projectVersionDescription_manifestSummary :: Lens' ProjectVersionDescription (Maybe GroundTruthManifest) Source #
The location of the summary manifest. The summary manifest provides aggregate data validation results for the training and test datasets.
projectVersionDescription_kmsKeyId :: Lens' ProjectVersionDescription (Maybe Text) Source #
The identifer for the AWS Key Management Service (AWS KMS) customer master key that was used to encrypt the model during training.
projectVersionDescription_testingDataResult :: Lens' ProjectVersionDescription (Maybe TestingDataResult) Source #
Contains information about the testing results.
projectVersionDescription_statusMessage :: Lens' ProjectVersionDescription (Maybe Text) Source #
A descriptive message for an error or warning that occurred.
projectVersionDescription_creationTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime) Source #
The Unix datetime for the date and time that training started.
projectVersionDescription_projectVersionArn :: Lens' ProjectVersionDescription (Maybe Text) Source #
The Amazon Resource Name (ARN) of the model version.
projectVersionDescription_outputConfig :: Lens' ProjectVersionDescription (Maybe OutputConfig) Source #
The location where training results are saved.
projectVersionDescription_billableTrainingTimeInSeconds :: Lens' ProjectVersionDescription (Maybe Natural) Source #
The duration, in seconds, that the model version has been billed for training. This value is only returned if the model version has been successfully trained.
projectVersionDescription_trainingEndTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime) Source #
The Unix date and time that training of the model ended.
projectVersionDescription_trainingDataResult :: Lens' ProjectVersionDescription (Maybe TrainingDataResult) Source #
Contains information about the training results.
ProtectiveEquipmentBodyPart
data ProtectiveEquipmentBodyPart Source #
Information about a body part detected by DetectProtectiveEquipment that
contains PPE. An array of ProtectiveEquipmentBodyPart
objects is
returned for each person detected by DetectProtectiveEquipment
.
See: newProtectiveEquipmentBodyPart
smart constructor.
ProtectiveEquipmentBodyPart' | |
|
Instances
newProtectiveEquipmentBodyPart :: ProtectiveEquipmentBodyPart Source #
Create a value of ProtectiveEquipmentBodyPart
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:equipmentDetections:ProtectiveEquipmentBodyPart'
, protectiveEquipmentBodyPart_equipmentDetections
- An array of Personal Protective Equipment items detected around a body
part.
$sel:confidence:ProtectiveEquipmentBodyPart'
, protectiveEquipmentBodyPart_confidence
- The confidence that Amazon Rekognition has in the detection accuracy of
the detected body part.
$sel:name:ProtectiveEquipmentBodyPart'
, protectiveEquipmentBodyPart_name
- The detected body part.
protectiveEquipmentBodyPart_equipmentDetections :: Lens' ProtectiveEquipmentBodyPart (Maybe [EquipmentDetection]) Source #
An array of Personal Protective Equipment items detected around a body part.
protectiveEquipmentBodyPart_confidence :: Lens' ProtectiveEquipmentBodyPart (Maybe Double) Source #
The confidence that Amazon Rekognition has in the detection accuracy of the detected body part.
protectiveEquipmentBodyPart_name :: Lens' ProtectiveEquipmentBodyPart (Maybe BodyPart) Source #
The detected body part.
ProtectiveEquipmentPerson
data ProtectiveEquipmentPerson Source #
A person detected by a call to DetectProtectiveEquipment. The API
returns all persons detected in the input image in an array of
ProtectiveEquipmentPerson
objects.
See: newProtectiveEquipmentPerson
smart constructor.
ProtectiveEquipmentPerson' | |
|
Instances
newProtectiveEquipmentPerson :: ProtectiveEquipmentPerson Source #
Create a value of ProtectiveEquipmentPerson
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:bodyParts:ProtectiveEquipmentPerson'
, protectiveEquipmentPerson_bodyParts
- An array of body parts detected on a person's body (including body
parts without PPE).
$sel:boundingBox:ProtectiveEquipmentPerson'
, protectiveEquipmentPerson_boundingBox
- A bounding box around the detected person.
$sel:confidence:ProtectiveEquipmentPerson'
, protectiveEquipmentPerson_confidence
- The confidence that Amazon Rekognition has that the bounding box
contains a person.
$sel:id:ProtectiveEquipmentPerson'
, protectiveEquipmentPerson_id
- The identifier for the detected person. The identifier is only unique
for a single call to DetectProtectiveEquipment
.
protectiveEquipmentPerson_bodyParts :: Lens' ProtectiveEquipmentPerson (Maybe [ProtectiveEquipmentBodyPart]) Source #
An array of body parts detected on a person's body (including body parts without PPE).
protectiveEquipmentPerson_boundingBox :: Lens' ProtectiveEquipmentPerson (Maybe BoundingBox) Source #
A bounding box around the detected person.
protectiveEquipmentPerson_confidence :: Lens' ProtectiveEquipmentPerson (Maybe Double) Source #
The confidence that Amazon Rekognition has that the bounding box contains a person.
protectiveEquipmentPerson_id :: Lens' ProtectiveEquipmentPerson (Maybe Natural) Source #
The identifier for the detected person. The identifier is only unique
for a single call to DetectProtectiveEquipment
.
ProtectiveEquipmentSummarizationAttributes
data ProtectiveEquipmentSummarizationAttributes Source #
Specifies summary attributes to return from a call to
DetectProtectiveEquipment. You can specify which types of PPE to
summarize. You can also specify a minimum confidence value for
detections. Summary information is returned in the Summary
(ProtectiveEquipmentSummary) field of the response from
DetectProtectiveEquipment
. The summary includes which persons in an
image were detected wearing the requested types of person protective
equipment (PPE), which persons were detected as not wearing PPE, and the
persons in which a determination could not be made. For more
information, see ProtectiveEquipmentSummary.
See: newProtectiveEquipmentSummarizationAttributes
smart constructor.
ProtectiveEquipmentSummarizationAttributes' | |
|
Instances
newProtectiveEquipmentSummarizationAttributes Source #
:: Double |
|
-> ProtectiveEquipmentSummarizationAttributes |
Create a value of ProtectiveEquipmentSummarizationAttributes
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minConfidence:ProtectiveEquipmentSummarizationAttributes'
, protectiveEquipmentSummarizationAttributes_minConfidence
- The minimum confidence level for which you want summary information. The
confidence level applies to person detection, body part detection,
equipment detection, and body part coverage. Amazon Rekognition doesn't
return summary information with a confidence than this specified value.
There isn't a default value.
Specify a MinConfidence
value that is between 50-100% as
DetectProtectiveEquipment
returns predictions only where the detection
confidence is between 50% - 100%. If you specify a value that is less
than 50%, the results are the same specifying a value of 50%.
$sel:requiredEquipmentTypes:ProtectiveEquipmentSummarizationAttributes'
, protectiveEquipmentSummarizationAttributes_requiredEquipmentTypes
- An array of personal protective equipment types for which you want
summary information. If a person is detected wearing a required
requipment type, the person's ID is added to the
PersonsWithRequiredEquipment
array field returned in
ProtectiveEquipmentSummary by DetectProtectiveEquipment
.
protectiveEquipmentSummarizationAttributes_minConfidence :: Lens' ProtectiveEquipmentSummarizationAttributes Double Source #
The minimum confidence level for which you want summary information. The confidence level applies to person detection, body part detection, equipment detection, and body part coverage. Amazon Rekognition doesn't return summary information with a confidence than this specified value. There isn't a default value.
Specify a MinConfidence
value that is between 50-100% as
DetectProtectiveEquipment
returns predictions only where the detection
confidence is between 50% - 100%. If you specify a value that is less
than 50%, the results are the same specifying a value of 50%.
protectiveEquipmentSummarizationAttributes_requiredEquipmentTypes :: Lens' ProtectiveEquipmentSummarizationAttributes [ProtectiveEquipmentType] Source #
An array of personal protective equipment types for which you want
summary information. If a person is detected wearing a required
requipment type, the person's ID is added to the
PersonsWithRequiredEquipment
array field returned in
ProtectiveEquipmentSummary by DetectProtectiveEquipment
.
ProtectiveEquipmentSummary
data ProtectiveEquipmentSummary Source #
Summary information for required items of personal protective equipment
(PPE) detected on persons by a call to DetectProtectiveEquipment. You
specify the required type of PPE in the SummarizationAttributes
(ProtectiveEquipmentSummarizationAttributes) input parameter. The
summary includes which persons were detected wearing the required
personal protective equipment (PersonsWithRequiredEquipment
), which
persons were detected as not wearing the required PPE
(PersonsWithoutRequiredEquipment
), and the persons in which a
determination could not be made (PersonsIndeterminate
).
To get a total for each category, use the size of the field array. For
example, to find out how many people were detected as wearing the
specified PPE, use the size of the PersonsWithRequiredEquipment
array.
If you want to find out more about a person, such as the location
(BoundingBox) of the person on the image, use the person ID in each
array element. Each person ID matches the ID field of a
ProtectiveEquipmentPerson object returned in the Persons
array by
DetectProtectiveEquipment
.
See: newProtectiveEquipmentSummary
smart constructor.
ProtectiveEquipmentSummary' | |
|
Instances
newProtectiveEquipmentSummary :: ProtectiveEquipmentSummary Source #
Create a value of ProtectiveEquipmentSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:personsWithRequiredEquipment:ProtectiveEquipmentSummary'
, protectiveEquipmentSummary_personsWithRequiredEquipment
- An array of IDs for persons who are wearing detected personal protective
equipment.
$sel:personsWithoutRequiredEquipment:ProtectiveEquipmentSummary'
, protectiveEquipmentSummary_personsWithoutRequiredEquipment
- An array of IDs for persons who are not wearing all of the types of PPE
specified in the RequiredEquipmentTypes
field of the detected personal
protective equipment.
$sel:personsIndeterminate:ProtectiveEquipmentSummary'
, protectiveEquipmentSummary_personsIndeterminate
- An array of IDs for persons where it was not possible to determine if
they are wearing personal protective equipment.
protectiveEquipmentSummary_personsWithRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural]) Source #
An array of IDs for persons who are wearing detected personal protective equipment.
protectiveEquipmentSummary_personsWithoutRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural]) Source #
An array of IDs for persons who are not wearing all of the types of PPE
specified in the RequiredEquipmentTypes
field of the detected personal
protective equipment.
protectiveEquipmentSummary_personsIndeterminate :: Lens' ProtectiveEquipmentSummary (Maybe [Natural]) Source #
An array of IDs for persons where it was not possible to determine if they are wearing personal protective equipment.
RegionOfInterest
data RegionOfInterest Source #
Specifies a location within the frame that Rekognition checks for text.
Uses a BoundingBox
object to set a region of the screen.
A word is included in the region if the word is more than half in that region. If there is more than one region, the word will be compared with all regions of the screen. Any word more than half in a region is kept in the results.
See: newRegionOfInterest
smart constructor.
RegionOfInterest' | |
|
Instances
newRegionOfInterest :: RegionOfInterest Source #
Create a value of RegionOfInterest
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:RegionOfInterest'
, regionOfInterest_boundingBox
- The box representing a region of interest on screen.
regionOfInterest_boundingBox :: Lens' RegionOfInterest (Maybe BoundingBox) Source #
The box representing a region of interest on screen.
S3Object
Provides the S3 bucket name and object name.
The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.
For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource-Based Policies in the Amazon Rekognition Developer Guide.
See: newS3Object
smart constructor.
Instances
Eq S3Object Source # | |
Read S3Object Source # | |
Show S3Object Source # | |
Generic S3Object Source # | |
NFData S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object | |
Hashable S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object | |
ToJSON S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object | |
FromJSON S3Object Source # | |
type Rep S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object type Rep S3Object = D1 ('MetaData "S3Object" "Amazonka.Rekognition.Types.S3Object" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "S3Object'" 'PrefixI 'True) (S1 ('MetaSel ('Just "bucket") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: (S1 ('MetaSel ('Just "name") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "version") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text))))) |
newS3Object :: S3Object Source #
Create a value of S3Object
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:bucket:S3Object'
, s3Object_bucket
- Name of the S3 bucket.
$sel:name:S3Object'
, s3Object_name
- S3 object key name.
$sel:version:S3Object'
, s3Object_version
- If the bucket is versioning enabled, you can specify the object version.
s3Object_version :: Lens' S3Object (Maybe Text) Source #
If the bucket is versioning enabled, you can specify the object version.
SegmentDetection
data SegmentDetection Source #
A technical cue or shot detection segment detected in a video. An array
of SegmentDetection
objects containing all segments detected in a
stored video is returned by GetSegmentDetection.
See: newSegmentDetection
smart constructor.
SegmentDetection' | |
|
Instances
newSegmentDetection :: SegmentDetection Source #
Create a value of SegmentDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:technicalCueSegment:SegmentDetection'
, segmentDetection_technicalCueSegment
- If the segment is a technical cue, contains information about the
technical cue.
$sel:endFrameNumber:SegmentDetection'
, segmentDetection_endFrameNumber
- The frame number at the end of a video segment, using a frame index that
starts with 0.
$sel:durationSMPTE:SegmentDetection'
, segmentDetection_durationSMPTE
- The duration of the timecode for the detected segment in SMPTE format.
$sel:endTimestampMillis:SegmentDetection'
, segmentDetection_endTimestampMillis
- The end time of the detected segment, in milliseconds, from the start of
the video. This value is rounded down.
$sel:startTimecodeSMPTE:SegmentDetection'
, segmentDetection_startTimecodeSMPTE
- The frame-accurate SMPTE timecode, from the start of a video, for the
start of a detected segment. StartTimecode
is in HH:MM:SS:fr format
(and ;fr for drop frame-rates).
$sel:endTimecodeSMPTE:SegmentDetection'
, segmentDetection_endTimecodeSMPTE
- The frame-accurate SMPTE timecode, from the start of a video, for the
end of a detected segment. EndTimecode
is in HH:MM:SS:fr format (and
;fr for drop frame-rates).
$sel:durationMillis:SegmentDetection'
, segmentDetection_durationMillis
- The duration of the detected segment in milliseconds.
$sel:durationFrames:SegmentDetection'
, segmentDetection_durationFrames
- The duration of a video segment, expressed in frames.
$sel:startTimestampMillis:SegmentDetection'
, segmentDetection_startTimestampMillis
- The start time of the detected segment in milliseconds from the start of
the video. This value is rounded down. For example, if the actual
timestamp is 100.6667 milliseconds, Amazon Rekognition Video returns a
value of 100 millis.
$sel:type':SegmentDetection'
, segmentDetection_type
- The type of the segment. Valid values are TECHNICAL_CUE
and SHOT
.
$sel:shotSegment:SegmentDetection'
, segmentDetection_shotSegment
- If the segment is a shot detection, contains information about the shot
detection.
$sel:startFrameNumber:SegmentDetection'
, segmentDetection_startFrameNumber
- The frame number of the start of a video segment, using a frame index
that starts with 0.
segmentDetection_technicalCueSegment :: Lens' SegmentDetection (Maybe TechnicalCueSegment) Source #
If the segment is a technical cue, contains information about the technical cue.
segmentDetection_endFrameNumber :: Lens' SegmentDetection (Maybe Natural) Source #
The frame number at the end of a video segment, using a frame index that starts with 0.
segmentDetection_durationSMPTE :: Lens' SegmentDetection (Maybe Text) Source #
The duration of the timecode for the detected segment in SMPTE format.
segmentDetection_endTimestampMillis :: Lens' SegmentDetection (Maybe Integer) Source #
The end time of the detected segment, in milliseconds, from the start of the video. This value is rounded down.
segmentDetection_startTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text) Source #
The frame-accurate SMPTE timecode, from the start of a video, for the
start of a detected segment. StartTimecode
is in HH:MM:SS:fr format
(and ;fr for drop frame-rates).
segmentDetection_endTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text) Source #
The frame-accurate SMPTE timecode, from the start of a video, for the
end of a detected segment. EndTimecode
is in HH:MM:SS:fr format (and
;fr for drop frame-rates).
segmentDetection_durationMillis :: Lens' SegmentDetection (Maybe Natural) Source #
The duration of the detected segment in milliseconds.
segmentDetection_durationFrames :: Lens' SegmentDetection (Maybe Natural) Source #
The duration of a video segment, expressed in frames.
segmentDetection_startTimestampMillis :: Lens' SegmentDetection (Maybe Integer) Source #
The start time of the detected segment in milliseconds from the start of the video. This value is rounded down. For example, if the actual timestamp is 100.6667 milliseconds, Amazon Rekognition Video returns a value of 100 millis.
segmentDetection_type :: Lens' SegmentDetection (Maybe SegmentType) Source #
The type of the segment. Valid values are TECHNICAL_CUE
and SHOT
.
segmentDetection_shotSegment :: Lens' SegmentDetection (Maybe ShotSegment) Source #
If the segment is a shot detection, contains information about the shot detection.
segmentDetection_startFrameNumber :: Lens' SegmentDetection (Maybe Natural) Source #
The frame number of the start of a video segment, using a frame index that starts with 0.
SegmentTypeInfo
data SegmentTypeInfo Source #
Information about the type of a segment requested in a call to
StartSegmentDetection. An array of SegmentTypeInfo
objects is returned
by the response from GetSegmentDetection.
See: newSegmentTypeInfo
smart constructor.
SegmentTypeInfo' | |
|
Instances
newSegmentTypeInfo :: SegmentTypeInfo Source #
Create a value of SegmentTypeInfo
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:modelVersion:SegmentTypeInfo'
, segmentTypeInfo_modelVersion
- The version of the model used to detect segments.
$sel:type':SegmentTypeInfo'
, segmentTypeInfo_type
- The type of a segment (technical cue or shot detection).
segmentTypeInfo_modelVersion :: Lens' SegmentTypeInfo (Maybe Text) Source #
The version of the model used to detect segments.
segmentTypeInfo_type :: Lens' SegmentTypeInfo (Maybe SegmentType) Source #
The type of a segment (technical cue or shot detection).
ShotSegment
data ShotSegment Source #
Information about a shot detection segment detected in a video. For more information, see SegmentDetection.
See: newShotSegment
smart constructor.
Instances
newShotSegment :: ShotSegment Source #
Create a value of ShotSegment
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:ShotSegment'
, shotSegment_confidence
- The confidence that Amazon Rekognition Video has in the accuracy of the
detected segment.
$sel:index:ShotSegment'
, shotSegment_index
- An Identifier for a shot detection segment detected in a video.
shotSegment_confidence :: Lens' ShotSegment (Maybe Double) Source #
The confidence that Amazon Rekognition Video has in the accuracy of the detected segment.
shotSegment_index :: Lens' ShotSegment (Maybe Natural) Source #
An Identifier for a shot detection segment detected in a video.
Smile
Indicates whether or not the face is smiling, and the confidence level in the determination.
See: newSmile
smart constructor.
Instances
Eq Smile Source # | |
Read Smile Source # | |
Show Smile Source # | |
Generic Smile Source # | |
NFData Smile Source # | |
Defined in Amazonka.Rekognition.Types.Smile | |
Hashable Smile Source # | |
Defined in Amazonka.Rekognition.Types.Smile | |
FromJSON Smile Source # | |
type Rep Smile Source # | |
Defined in Amazonka.Rekognition.Types.Smile type Rep Smile = D1 ('MetaData "Smile" "Amazonka.Rekognition.Types.Smile" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Smile'" 'PrefixI 'True) (S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)) :*: S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
Create a value of Smile
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:Smile'
, smile_value
- Boolean value that indicates whether the face is smiling or not.
$sel:confidence:Smile'
, smile_confidence
- Level of confidence in the determination.
smile_value :: Lens' Smile (Maybe Bool) Source #
Boolean value that indicates whether the face is smiling or not.
StartSegmentDetectionFilters
data StartSegmentDetectionFilters Source #
Filters applied to the technical cue or shot detection segments. For more information, see StartSegmentDetection.
See: newStartSegmentDetectionFilters
smart constructor.
StartSegmentDetectionFilters' | |
|
Instances
newStartSegmentDetectionFilters :: StartSegmentDetectionFilters Source #
Create a value of StartSegmentDetectionFilters
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:technicalCueFilter:StartSegmentDetectionFilters'
, startSegmentDetectionFilters_technicalCueFilter
- Filters that are specific to technical cues.
$sel:shotFilter:StartSegmentDetectionFilters'
, startSegmentDetectionFilters_shotFilter
- Filters that are specific to shot detections.
startSegmentDetectionFilters_technicalCueFilter :: Lens' StartSegmentDetectionFilters (Maybe StartTechnicalCueDetectionFilter) Source #
Filters that are specific to technical cues.
startSegmentDetectionFilters_shotFilter :: Lens' StartSegmentDetectionFilters (Maybe StartShotDetectionFilter) Source #
Filters that are specific to shot detections.
StartShotDetectionFilter
data StartShotDetectionFilter Source #
Filters for the shot detection segments returned by
GetSegmentDetection
. For more information, see
StartSegmentDetectionFilters.
See: newStartShotDetectionFilter
smart constructor.
StartShotDetectionFilter' | |
|
Instances
newStartShotDetectionFilter :: StartShotDetectionFilter Source #
Create a value of StartShotDetectionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minSegmentConfidence:StartShotDetectionFilter'
, startShotDetectionFilter_minSegmentConfidence
- Specifies the minimum confidence that Amazon Rekognition Video must have
in order to return a detected segment. Confidence represents how certain
Amazon Rekognition is that a segment is correctly identified. 0 is the
lowest confidence. 100 is the highest confidence. Amazon Rekognition
Video doesn't return any segments with a confidence level lower than
this specified value.
If you don't specify MinSegmentConfidence
, the GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
startShotDetectionFilter_minSegmentConfidence :: Lens' StartShotDetectionFilter (Maybe Double) Source #
Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value.
If you don't specify MinSegmentConfidence
, the GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
StartTechnicalCueDetectionFilter
data StartTechnicalCueDetectionFilter Source #
Filters for the technical segments returned by GetSegmentDetection. For more information, see StartSegmentDetectionFilters.
See: newStartTechnicalCueDetectionFilter
smart constructor.
StartTechnicalCueDetectionFilter' | |
|
Instances
Eq StartTechnicalCueDetectionFilter Source # | |
Read StartTechnicalCueDetectionFilter Source # | |
Show StartTechnicalCueDetectionFilter Source # | |
Generic StartTechnicalCueDetectionFilter Source # | |
NFData StartTechnicalCueDetectionFilter Source # | |
Hashable StartTechnicalCueDetectionFilter Source # | |
ToJSON StartTechnicalCueDetectionFilter Source # | |
type Rep StartTechnicalCueDetectionFilter Source # | |
Defined in Amazonka.Rekognition.Types.StartTechnicalCueDetectionFilter type Rep StartTechnicalCueDetectionFilter = D1 ('MetaData "StartTechnicalCueDetectionFilter" "Amazonka.Rekognition.Types.StartTechnicalCueDetectionFilter" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "StartTechnicalCueDetectionFilter'" 'PrefixI 'True) (S1 ('MetaSel ('Just "blackFrame") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe BlackFrame)) :*: S1 ('MetaSel ('Just "minSegmentConfidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
newStartTechnicalCueDetectionFilter :: StartTechnicalCueDetectionFilter Source #
Create a value of StartTechnicalCueDetectionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:blackFrame:StartTechnicalCueDetectionFilter'
, startTechnicalCueDetectionFilter_blackFrame
- A filter that allows you to control the black frame detection by
specifying the black levels and pixel coverage of black pixels in a
frame. Videos can come from multiple sources, formats, and time periods,
with different standards and varying noise levels for black frames that
need to be accounted for.
$sel:minSegmentConfidence:StartTechnicalCueDetectionFilter'
, startTechnicalCueDetectionFilter_minSegmentConfidence
- Specifies the minimum confidence that Amazon Rekognition Video must have
in order to return a detected segment. Confidence represents how certain
Amazon Rekognition is that a segment is correctly identified. 0 is the
lowest confidence. 100 is the highest confidence. Amazon Rekognition
Video doesn't return any segments with a confidence level lower than
this specified value.
If you don't specify MinSegmentConfidence
, GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
startTechnicalCueDetectionFilter_blackFrame :: Lens' StartTechnicalCueDetectionFilter (Maybe BlackFrame) Source #
A filter that allows you to control the black frame detection by specifying the black levels and pixel coverage of black pixels in a frame. Videos can come from multiple sources, formats, and time periods, with different standards and varying noise levels for black frames that need to be accounted for.
startTechnicalCueDetectionFilter_minSegmentConfidence :: Lens' StartTechnicalCueDetectionFilter (Maybe Double) Source #
Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value.
If you don't specify MinSegmentConfidence
, GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
StartTextDetectionFilters
data StartTextDetectionFilters Source #
Set of optional parameters that let you set the criteria text must meet
to be included in your response. WordFilter
looks at a word's height,
width and minimum confidence. RegionOfInterest
lets you set a specific
region of the screen to look for text in.
See: newStartTextDetectionFilters
smart constructor.
StartTextDetectionFilters' | |
|
Instances
newStartTextDetectionFilters :: StartTextDetectionFilters Source #
Create a value of StartTextDetectionFilters
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:regionsOfInterest:StartTextDetectionFilters'
, startTextDetectionFilters_regionsOfInterest
- Filter focusing on a certain area of the frame. Uses a BoundingBox
object to set the region of the screen.
$sel:wordFilter:StartTextDetectionFilters'
, startTextDetectionFilters_wordFilter
- Filters focusing on qualities of the text, such as confidence or size.
startTextDetectionFilters_regionsOfInterest :: Lens' StartTextDetectionFilters (Maybe [RegionOfInterest]) Source #
Filter focusing on a certain area of the frame. Uses a BoundingBox
object to set the region of the screen.
startTextDetectionFilters_wordFilter :: Lens' StartTextDetectionFilters (Maybe DetectionFilter) Source #
Filters focusing on qualities of the text, such as confidence or size.
StreamProcessor
data StreamProcessor Source #
An object that recognizes faces in a streaming video. An Amazon
Rekognition stream processor is created by a call to
CreateStreamProcessor. The request parameters for
CreateStreamProcessor
describe the Kinesis video stream source for the
streaming video, face recognition parameters, and where to stream the
analysis resullts.
See: newStreamProcessor
smart constructor.
Instances
newStreamProcessor :: StreamProcessor Source #
Create a value of StreamProcessor
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:status:StreamProcessor'
, streamProcessor_status
- Current status of the Amazon Rekognition stream processor.
$sel:name:StreamProcessor'
, streamProcessor_name
- Name of the Amazon Rekognition stream processor.
streamProcessor_status :: Lens' StreamProcessor (Maybe StreamProcessorStatus) Source #
Current status of the Amazon Rekognition stream processor.
streamProcessor_name :: Lens' StreamProcessor (Maybe Text) Source #
Name of the Amazon Rekognition stream processor.
StreamProcessorInput
data StreamProcessorInput Source #
Information about the source streaming video.
See: newStreamProcessorInput
smart constructor.
StreamProcessorInput' | |
|
Instances
newStreamProcessorInput :: StreamProcessorInput Source #
Create a value of StreamProcessorInput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:kinesisVideoStream:StreamProcessorInput'
, streamProcessorInput_kinesisVideoStream
- The Kinesis video stream input stream for the source streaming video.
streamProcessorInput_kinesisVideoStream :: Lens' StreamProcessorInput (Maybe KinesisVideoStream) Source #
The Kinesis video stream input stream for the source streaming video.
StreamProcessorOutput
data StreamProcessorOutput Source #
Information about the Amazon Kinesis Data Streams stream to which a Amazon Rekognition Video stream processor streams the results of a video analysis. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
See: newStreamProcessorOutput
smart constructor.
StreamProcessorOutput' | |
|
Instances
newStreamProcessorOutput :: StreamProcessorOutput Source #
Create a value of StreamProcessorOutput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:kinesisDataStream:StreamProcessorOutput'
, streamProcessorOutput_kinesisDataStream
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition
stream processor streams the analysis results.
streamProcessorOutput_kinesisDataStream :: Lens' StreamProcessorOutput (Maybe KinesisDataStream) Source #
The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results.
StreamProcessorSettings
data StreamProcessorSettings Source #
Input parameters used to recognize faces in a streaming video analyzed by a Amazon Rekognition stream processor.
See: newStreamProcessorSettings
smart constructor.
StreamProcessorSettings' | |
|
Instances
newStreamProcessorSettings :: StreamProcessorSettings Source #
Create a value of StreamProcessorSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:faceSearch:StreamProcessorSettings'
, streamProcessorSettings_faceSearch
- Face search settings to use on a streaming video.
streamProcessorSettings_faceSearch :: Lens' StreamProcessorSettings (Maybe FaceSearchSettings) Source #
Face search settings to use on a streaming video.
Summary
The S3 bucket that contains the training summary. The training summary includes aggregated evaluation metrics for the entire testing dataset and metrics for each individual label.
You get the training summary S3 bucket location by calling DescribeProjectVersions.
See: newSummary
smart constructor.
Instances
Eq Summary Source # | |
Read Summary Source # | |
Show Summary Source # | |
Generic Summary Source # | |
NFData Summary Source # | |
Defined in Amazonka.Rekognition.Types.Summary | |
Hashable Summary Source # | |
Defined in Amazonka.Rekognition.Types.Summary | |
FromJSON Summary Source # | |
type Rep Summary Source # | |
Defined in Amazonka.Rekognition.Types.Summary type Rep Summary = D1 ('MetaData "Summary" "Amazonka.Rekognition.Types.Summary" "libZSservicesZSamazonka-rekognitionZSamazonka-rekognition" 'False) (C1 ('MetaCons "Summary'" 'PrefixI 'True) (S1 ('MetaSel ('Just "s3Object") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe S3Object)))) |
newSummary :: Summary Source #
Create a value of Summary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Object:Summary'
, summary_s3Object
- Undocumented member.
Sunglasses
data Sunglasses Source #
Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.
See: newSunglasses
smart constructor.
Instances
newSunglasses :: Sunglasses Source #
Create a value of Sunglasses
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:Sunglasses'
, sunglasses_value
- Boolean value that indicates whether the face is wearing sunglasses or
not.
$sel:confidence:Sunglasses'
, sunglasses_confidence
- Level of confidence in the determination.
sunglasses_value :: Lens' Sunglasses (Maybe Bool) Source #
Boolean value that indicates whether the face is wearing sunglasses or not.
sunglasses_confidence :: Lens' Sunglasses (Maybe Double) Source #
Level of confidence in the determination.
TechnicalCueSegment
data TechnicalCueSegment Source #
Information about a technical cue segment. For more information, see SegmentDetection.
See: newTechnicalCueSegment
smart constructor.
TechnicalCueSegment' | |
|
Instances
newTechnicalCueSegment :: TechnicalCueSegment Source #
Create a value of TechnicalCueSegment
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:TechnicalCueSegment'
, technicalCueSegment_confidence
- The confidence that Amazon Rekognition Video has in the accuracy of the
detected segment.
$sel:type':TechnicalCueSegment'
, technicalCueSegment_type
- The type of the technical cue.
technicalCueSegment_confidence :: Lens' TechnicalCueSegment (Maybe Double) Source #
The confidence that Amazon Rekognition Video has in the accuracy of the detected segment.
technicalCueSegment_type :: Lens' TechnicalCueSegment (Maybe TechnicalCueType) Source #
The type of the technical cue.
TestingData
data TestingData Source #
The dataset used for testing. Optionally, if AutoCreate
is set, Amazon
Rekognition Custom Labels creates a testing dataset using an 80/20
split of the training dataset.
See: newTestingData
smart constructor.
Instances
newTestingData :: TestingData Source #
Create a value of TestingData
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:assets:TestingData'
, testingData_assets
- The assets used for testing.
$sel:autoCreate:TestingData'
, testingData_autoCreate
- If specified, Amazon Rekognition Custom Labels creates a testing dataset
with an 80/20 split of the training dataset.
testingData_assets :: Lens' TestingData (Maybe [Asset]) Source #
The assets used for testing.
testingData_autoCreate :: Lens' TestingData (Maybe Bool) Source #
If specified, Amazon Rekognition Custom Labels creates a testing dataset with an 80/20 split of the training dataset.
TestingDataResult
data TestingDataResult Source #
Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.
See: newTestingDataResult
smart constructor.
TestingDataResult' | |
|
Instances
newTestingDataResult :: TestingDataResult Source #
Create a value of TestingDataResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:input:TestingDataResult'
, testingDataResult_input
- The testing dataset that was supplied for training.
$sel:output:TestingDataResult'
, testingDataResult_output
- The subset of the dataset that was actually tested. Some images (assets)
might not be tested due to file formatting and other issues.
$sel:validation:TestingDataResult'
, testingDataResult_validation
- The location of the data validation manifest. The data validation
manifest is created for the test dataset during model training.
testingDataResult_input :: Lens' TestingDataResult (Maybe TestingData) Source #
The testing dataset that was supplied for training.
testingDataResult_output :: Lens' TestingDataResult (Maybe TestingData) Source #
The subset of the dataset that was actually tested. Some images (assets) might not be tested due to file formatting and other issues.
testingDataResult_validation :: Lens' TestingDataResult (Maybe ValidationData) Source #
The location of the data validation manifest. The data validation manifest is created for the test dataset during model training.
TextDetection
data TextDetection Source #
Information about a word or line of text detected by DetectText.
The DetectedText
field contains the text that Amazon Rekognition
detected in the image.
Every word and line has an identifier (Id
). Each word belongs to a
line and has a parent identifier (ParentId
) that identifies the line
of text in which the word appears. The word Id
is also an index for
the word within a line of words.
For more information, see Detecting Text in the Amazon Rekognition Developer Guide.
See: newTextDetection
smart constructor.
TextDetection' | |
|
Instances
newTextDetection :: TextDetection Source #
Create a value of TextDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:detectedText:TextDetection'
, textDetection_detectedText
- The word or line of text recognized by Amazon Rekognition.
$sel:confidence:TextDetection'
, textDetection_confidence
- The confidence that Amazon Rekognition has in the accuracy of the
detected text and the accuracy of the geometry points around the
detected text.
$sel:geometry:TextDetection'
, textDetection_geometry
- The location of the detected text on the image. Includes an axis aligned
coarse bounding box surrounding the text and a finer grain polygon for
more accurate spatial information.
$sel:id:TextDetection'
, textDetection_id
- The identifier for the detected text. The identifier is only unique for
a single call to DetectText
.
$sel:type':TextDetection'
, textDetection_type
- The type of text that was detected.
$sel:parentId:TextDetection'
, textDetection_parentId
- The Parent identifier for the detected text identified by the value of
ID
. If the type of detected text is LINE
, the value of ParentId
is
Null
.
textDetection_detectedText :: Lens' TextDetection (Maybe Text) Source #
The word or line of text recognized by Amazon Rekognition.
textDetection_confidence :: Lens' TextDetection (Maybe Double) Source #
The confidence that Amazon Rekognition has in the accuracy of the detected text and the accuracy of the geometry points around the detected text.
textDetection_geometry :: Lens' TextDetection (Maybe Geometry) Source #
The location of the detected text on the image. Includes an axis aligned coarse bounding box surrounding the text and a finer grain polygon for more accurate spatial information.
textDetection_id :: Lens' TextDetection (Maybe Natural) Source #
The identifier for the detected text. The identifier is only unique for
a single call to DetectText
.
textDetection_type :: Lens' TextDetection (Maybe TextTypes) Source #
The type of text that was detected.
textDetection_parentId :: Lens' TextDetection (Maybe Natural) Source #
The Parent identifier for the detected text identified by the value of
ID
. If the type of detected text is LINE
, the value of ParentId
is
Null
.
TextDetectionResult
data TextDetectionResult Source #
Information about text detected in a video. Incudes the detected text, the time in milliseconds from the start of the video that the text was detected, and where it was detected on the screen.
See: newTextDetectionResult
smart constructor.
TextDetectionResult' | |
|
Instances
newTextDetectionResult :: TextDetectionResult Source #
Create a value of TextDetectionResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:textDetection:TextDetectionResult'
, textDetectionResult_textDetection
- Details about text detected in a video.
$sel:timestamp:TextDetectionResult'
, textDetectionResult_timestamp
- The time, in milliseconds from the start of the video, that the text was
detected.
textDetectionResult_textDetection :: Lens' TextDetectionResult (Maybe TextDetection) Source #
Details about text detected in a video.
textDetectionResult_timestamp :: Lens' TextDetectionResult (Maybe Integer) Source #
The time, in milliseconds from the start of the video, that the text was detected.
TrainingData
data TrainingData Source #
The dataset used for training.
See: newTrainingData
smart constructor.
Instances
newTrainingData :: TrainingData Source #
Create a value of TrainingData
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:assets:TrainingData'
, trainingData_assets
- A Sagemaker GroundTruth manifest file that contains the training images
(assets).
trainingData_assets :: Lens' TrainingData (Maybe [Asset]) Source #
A Sagemaker GroundTruth manifest file that contains the training images (assets).
TrainingDataResult
data TrainingDataResult Source #
Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.
See: newTrainingDataResult
smart constructor.
TrainingDataResult' | |
|
Instances
newTrainingDataResult :: TrainingDataResult Source #
Create a value of TrainingDataResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:input:TrainingDataResult'
, trainingDataResult_input
- The training assets that you supplied for training.
$sel:output:TrainingDataResult'
, trainingDataResult_output
- The images (assets) that were actually trained by Amazon Rekognition
Custom Labels.
$sel:validation:TrainingDataResult'
, trainingDataResult_validation
- The location of the data validation manifest. The data validation
manifest is created for the training dataset during model training.
trainingDataResult_input :: Lens' TrainingDataResult (Maybe TrainingData) Source #
The training assets that you supplied for training.
trainingDataResult_output :: Lens' TrainingDataResult (Maybe TrainingData) Source #
The images (assets) that were actually trained by Amazon Rekognition Custom Labels.
trainingDataResult_validation :: Lens' TrainingDataResult (Maybe ValidationData) Source #
The location of the data validation manifest. The data validation manifest is created for the training dataset during model training.
UnindexedFace
data UnindexedFace Source #
A face that IndexFaces detected, but didn't index. Use the Reasons
response attribute to determine why a face wasn't indexed.
See: newUnindexedFace
smart constructor.
UnindexedFace' | |
|
Instances
newUnindexedFace :: UnindexedFace Source #
Create a value of UnindexedFace
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:reasons:UnindexedFace'
, unindexedFace_reasons
- An array of reasons that specify why a face wasn't indexed.
- EXTREME_POSE - The face is at a pose that can't be detected. For example, the head is turned too far away from the camera.
- EXCEEDS_MAX_FACES - The number of faces detected is already higher
than that specified by the
MaxFaces
input parameter forIndexFaces
. - LOW_BRIGHTNESS - The image is too dark.
- LOW_SHARPNESS - The image is too blurry.
- LOW_CONFIDENCE - The face was detected with a low confidence.
- SMALL_BOUNDING_BOX - The bounding box around the face is too small.
$sel:faceDetail:UnindexedFace'
, unindexedFace_faceDetail
- The structure that contains attributes of a face that
IndexFaces
detected, but didn't index.
unindexedFace_reasons :: Lens' UnindexedFace (Maybe [Reason]) Source #
An array of reasons that specify why a face wasn't indexed.
- EXTREME_POSE - The face is at a pose that can't be detected. For example, the head is turned too far away from the camera.
- EXCEEDS_MAX_FACES - The number of faces detected is already higher
than that specified by the
MaxFaces
input parameter forIndexFaces
. - LOW_BRIGHTNESS - The image is too dark.
- LOW_SHARPNESS - The image is too blurry.
- LOW_CONFIDENCE - The face was detected with a low confidence.
- SMALL_BOUNDING_BOX - The bounding box around the face is too small.
unindexedFace_faceDetail :: Lens' UnindexedFace (Maybe FaceDetail) Source #
The structure that contains attributes of a face that
IndexFaces
detected, but didn't index.
ValidationData
data ValidationData Source #
Contains the Amazon S3 bucket location of the validation data for a model training job.
The validation data includes error information for individual JSON lines in the dataset. For more information, see Debugging a Failed Model Training in the Amazon Rekognition Custom Labels Developer Guide.
You get the ValidationData
object for the training dataset
(TrainingDataResult) and the test dataset (TestingDataResult) by calling
DescribeProjectVersions.
The assets array contains a single Asset object. The GroundTruthManifest field of the Asset object contains the S3 bucket location of the validation data.
See: newValidationData
smart constructor.
Instances
newValidationData :: ValidationData Source #
Create a value of ValidationData
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:assets:ValidationData'
, validationData_assets
- The assets that comprise the validation data.
validationData_assets :: Lens' ValidationData (Maybe [Asset]) Source #
The assets that comprise the validation data.
Video
Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
operations such as StartLabelDetection use Video
to specify a video
for analysis. The supported file formats are .mp4, .mov and .avi.
See: newVideo
smart constructor.
Instances
Eq Video Source # | |
Read Video Source # | |
Show Video Source # | |
Generic Video Source # | |
NFData Video Source # | |
Defined in Amazonka.Rekognition.Types.Video | |
Hashable Video Source # | |
Defined in Amazonka.Rekognition.Types.Video | |
ToJSON Video Source # | |
Defined in Amazonka.Rekognition.Types.Video | |
type Rep Video Source # | |
Defined in Amazonka.Rekognition.Types.Video |
Create a value of Video
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Object:Video'
, video_s3Object
- The Amazon S3 bucket name and file name for the video.
video_s3Object :: Lens' Video (Maybe S3Object) Source #
The Amazon S3 bucket name and file name for the video.
VideoMetadata
data VideoMetadata Source #
Information about a video that Amazon Rekognition analyzed.
Videometadata
is returned in every page of paginated responses from a
Amazon Rekognition video operation.
See: newVideoMetadata
smart constructor.
VideoMetadata' | |
|
Instances
newVideoMetadata :: VideoMetadata Source #
Create a value of VideoMetadata
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:frameRate:VideoMetadata'
, videoMetadata_frameRate
- Number of frames per second in the video.
$sel:colorRange:VideoMetadata'
, videoMetadata_colorRange
- A description of the range of luminance values in a video, either
LIMITED (16 to 235) or FULL (0 to 255).
$sel:format:VideoMetadata'
, videoMetadata_format
- Format of the analyzed video. Possible values are MP4, MOV and AVI.
$sel:codec:VideoMetadata'
, videoMetadata_codec
- Type of compression used in the analyzed video.
$sel:frameHeight:VideoMetadata'
, videoMetadata_frameHeight
- Vertical pixel dimension of the video.
$sel:durationMillis:VideoMetadata'
, videoMetadata_durationMillis
- Length of the video in milliseconds.
$sel:frameWidth:VideoMetadata'
, videoMetadata_frameWidth
- Horizontal pixel dimension of the video.
videoMetadata_frameRate :: Lens' VideoMetadata (Maybe Double) Source #
Number of frames per second in the video.
videoMetadata_colorRange :: Lens' VideoMetadata (Maybe VideoColorRange) Source #
A description of the range of luminance values in a video, either LIMITED (16 to 235) or FULL (0 to 255).
videoMetadata_format :: Lens' VideoMetadata (Maybe Text) Source #
Format of the analyzed video. Possible values are MP4, MOV and AVI.
videoMetadata_codec :: Lens' VideoMetadata (Maybe Text) Source #
Type of compression used in the analyzed video.
videoMetadata_frameHeight :: Lens' VideoMetadata (Maybe Natural) Source #
Vertical pixel dimension of the video.
videoMetadata_durationMillis :: Lens' VideoMetadata (Maybe Natural) Source #
Length of the video in milliseconds.
videoMetadata_frameWidth :: Lens' VideoMetadata (Maybe Natural) Source #
Horizontal pixel dimension of the video.