Copyright | (c) 2013-2021 Brendan Hay |
---|---|
License | Mozilla Public License, v. 2.0. |
Maintainer | Brendan Hay <brendan.g.hay+amazonka@gmail.com> |
Stability | auto-generated |
Portability | non-portable (GHC extensions) |
Safe Haskell | None |
- Service Configuration
- Errors
- BaseModelName
- CLMLanguageCode
- CallAnalyticsJobStatus
- LanguageCode
- MediaFormat
- MedicalContentIdentificationType
- ModelStatus
- OutputLocationType
- ParticipantRole
- RedactionOutput
- RedactionType
- SentimentValue
- Specialty
- SubtitleFormat
- TranscriptFilterType
- TranscriptionJobStatus
- Type
- VocabularyFilterMethod
- VocabularyState
- AbsoluteTimeRange
- CallAnalyticsJob
- CallAnalyticsJobSettings
- CallAnalyticsJobSummary
- CategoryProperties
- ChannelDefinition
- ContentRedaction
- InputDataConfig
- InterruptionFilter
- JobExecutionSettings
- LanguageModel
- Media
- MedicalTranscript
- MedicalTranscriptionJob
- MedicalTranscriptionJobSummary
- MedicalTranscriptionSetting
- ModelSettings
- NonTalkTimeFilter
- RelativeTimeRange
- Rule
- SentimentFilter
- Settings
- Subtitles
- SubtitlesOutput
- Tag
- Transcript
- TranscriptFilter
- TranscriptionJob
- TranscriptionJobSummary
- VocabularyFilterInfo
- VocabularyInfo
Synopsis
- defaultService :: Service
- _ConflictException :: AsError a => Getting (First ServiceError) a ServiceError
- _NotFoundException :: AsError a => Getting (First ServiceError) a ServiceError
- _InternalFailureException :: AsError a => Getting (First ServiceError) a ServiceError
- _BadRequestException :: AsError a => Getting (First ServiceError) a ServiceError
- _LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- newtype BaseModelName where
- BaseModelName' { }
- pattern BaseModelName_NarrowBand :: BaseModelName
- pattern BaseModelName_WideBand :: BaseModelName
- newtype CLMLanguageCode where
- CLMLanguageCode' { }
- pattern CLMLanguageCode_En_AU :: CLMLanguageCode
- pattern CLMLanguageCode_En_GB :: CLMLanguageCode
- pattern CLMLanguageCode_En_US :: CLMLanguageCode
- pattern CLMLanguageCode_Es_US :: CLMLanguageCode
- pattern CLMLanguageCode_Hi_IN :: CLMLanguageCode
- newtype CallAnalyticsJobStatus where
- newtype LanguageCode where
- LanguageCode' { }
- pattern LanguageCode_Af_ZA :: LanguageCode
- pattern LanguageCode_Ar_AE :: LanguageCode
- pattern LanguageCode_Ar_SA :: LanguageCode
- pattern LanguageCode_Cy_GB :: LanguageCode
- pattern LanguageCode_Da_DK :: LanguageCode
- pattern LanguageCode_De_CH :: LanguageCode
- pattern LanguageCode_De_DE :: LanguageCode
- pattern LanguageCode_En_AB :: LanguageCode
- pattern LanguageCode_En_AU :: LanguageCode
- pattern LanguageCode_En_GB :: LanguageCode
- pattern LanguageCode_En_IE :: LanguageCode
- pattern LanguageCode_En_IN :: LanguageCode
- pattern LanguageCode_En_NZ :: LanguageCode
- pattern LanguageCode_En_US :: LanguageCode
- pattern LanguageCode_En_WL :: LanguageCode
- pattern LanguageCode_En_ZA :: LanguageCode
- pattern LanguageCode_Es_ES :: LanguageCode
- pattern LanguageCode_Es_US :: LanguageCode
- pattern LanguageCode_Fa_IR :: LanguageCode
- pattern LanguageCode_Fr_CA :: LanguageCode
- pattern LanguageCode_Fr_FR :: LanguageCode
- pattern LanguageCode_Ga_IE :: LanguageCode
- pattern LanguageCode_Gd_GB :: LanguageCode
- pattern LanguageCode_He_IL :: LanguageCode
- pattern LanguageCode_Hi_IN :: LanguageCode
- pattern LanguageCode_Id_ID :: LanguageCode
- pattern LanguageCode_It_IT :: LanguageCode
- pattern LanguageCode_Ja_JP :: LanguageCode
- pattern LanguageCode_Ko_KR :: LanguageCode
- pattern LanguageCode_Ms_MY :: LanguageCode
- pattern LanguageCode_Nl_NL :: LanguageCode
- pattern LanguageCode_Pt_BR :: LanguageCode
- pattern LanguageCode_Pt_PT :: LanguageCode
- pattern LanguageCode_Ru_RU :: LanguageCode
- pattern LanguageCode_Ta_IN :: LanguageCode
- pattern LanguageCode_Te_IN :: LanguageCode
- pattern LanguageCode_Th_TH :: LanguageCode
- pattern LanguageCode_Tr_TR :: LanguageCode
- pattern LanguageCode_Zh_CN :: LanguageCode
- pattern LanguageCode_Zh_TW :: LanguageCode
- newtype MediaFormat where
- MediaFormat' { }
- pattern MediaFormat_Amr :: MediaFormat
- pattern MediaFormat_Flac :: MediaFormat
- pattern MediaFormat_Mp3 :: MediaFormat
- pattern MediaFormat_Mp4 :: MediaFormat
- pattern MediaFormat_Ogg :: MediaFormat
- pattern MediaFormat_Wav :: MediaFormat
- pattern MediaFormat_Webm :: MediaFormat
- newtype MedicalContentIdentificationType where
- newtype ModelStatus where
- ModelStatus' { }
- pattern ModelStatus_COMPLETED :: ModelStatus
- pattern ModelStatus_FAILED :: ModelStatus
- pattern ModelStatus_IN_PROGRESS :: ModelStatus
- newtype OutputLocationType where
- newtype ParticipantRole where
- ParticipantRole' { }
- pattern ParticipantRole_AGENT :: ParticipantRole
- pattern ParticipantRole_CUSTOMER :: ParticipantRole
- newtype RedactionOutput where
- RedactionOutput' { }
- pattern RedactionOutput_Redacted :: RedactionOutput
- pattern RedactionOutput_Redacted_and_unredacted :: RedactionOutput
- newtype RedactionType where
- RedactionType' { }
- pattern RedactionType_PII :: RedactionType
- newtype SentimentValue where
- SentimentValue' { }
- pattern SentimentValue_MIXED :: SentimentValue
- pattern SentimentValue_NEGATIVE :: SentimentValue
- pattern SentimentValue_NEUTRAL :: SentimentValue
- pattern SentimentValue_POSITIVE :: SentimentValue
- newtype Specialty where
- Specialty' { }
- pattern Specialty_PRIMARYCARE :: Specialty
- newtype SubtitleFormat where
- SubtitleFormat' { }
- pattern SubtitleFormat_Srt :: SubtitleFormat
- pattern SubtitleFormat_Vtt :: SubtitleFormat
- newtype TranscriptFilterType where
- newtype TranscriptionJobStatus where
- newtype Type where
- Type' { }
- pattern Type_CONVERSATION :: Type
- pattern Type_DICTATION :: Type
- newtype VocabularyFilterMethod where
- newtype VocabularyState where
- VocabularyState' { }
- pattern VocabularyState_FAILED :: VocabularyState
- pattern VocabularyState_PENDING :: VocabularyState
- pattern VocabularyState_READY :: VocabularyState
- data AbsoluteTimeRange = AbsoluteTimeRange' {}
- newAbsoluteTimeRange :: AbsoluteTimeRange
- absoluteTimeRange_first :: Lens' AbsoluteTimeRange (Maybe Natural)
- absoluteTimeRange_startTime :: Lens' AbsoluteTimeRange (Maybe Natural)
- absoluteTimeRange_last :: Lens' AbsoluteTimeRange (Maybe Natural)
- absoluteTimeRange_endTime :: Lens' AbsoluteTimeRange (Maybe Natural)
- data CallAnalyticsJob = CallAnalyticsJob' {
- creationTime :: Maybe POSIX
- failureReason :: Maybe Text
- callAnalyticsJobStatus :: Maybe CallAnalyticsJobStatus
- identifiedLanguageScore :: Maybe Double
- languageCode :: Maybe LanguageCode
- settings :: Maybe CallAnalyticsJobSettings
- startTime :: Maybe POSIX
- completionTime :: Maybe POSIX
- callAnalyticsJobName :: Maybe Text
- media :: Maybe Media
- mediaFormat :: Maybe MediaFormat
- channelDefinitions :: Maybe (NonEmpty ChannelDefinition)
- dataAccessRoleArn :: Maybe Text
- transcript :: Maybe Transcript
- mediaSampleRateHertz :: Maybe Natural
- newCallAnalyticsJob :: CallAnalyticsJob
- callAnalyticsJob_creationTime :: Lens' CallAnalyticsJob (Maybe UTCTime)
- callAnalyticsJob_failureReason :: Lens' CallAnalyticsJob (Maybe Text)
- callAnalyticsJob_callAnalyticsJobStatus :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobStatus)
- callAnalyticsJob_identifiedLanguageScore :: Lens' CallAnalyticsJob (Maybe Double)
- callAnalyticsJob_languageCode :: Lens' CallAnalyticsJob (Maybe LanguageCode)
- callAnalyticsJob_settings :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobSettings)
- callAnalyticsJob_startTime :: Lens' CallAnalyticsJob (Maybe UTCTime)
- callAnalyticsJob_completionTime :: Lens' CallAnalyticsJob (Maybe UTCTime)
- callAnalyticsJob_callAnalyticsJobName :: Lens' CallAnalyticsJob (Maybe Text)
- callAnalyticsJob_media :: Lens' CallAnalyticsJob (Maybe Media)
- callAnalyticsJob_mediaFormat :: Lens' CallAnalyticsJob (Maybe MediaFormat)
- callAnalyticsJob_channelDefinitions :: Lens' CallAnalyticsJob (Maybe (NonEmpty ChannelDefinition))
- callAnalyticsJob_dataAccessRoleArn :: Lens' CallAnalyticsJob (Maybe Text)
- callAnalyticsJob_transcript :: Lens' CallAnalyticsJob (Maybe Transcript)
- callAnalyticsJob_mediaSampleRateHertz :: Lens' CallAnalyticsJob (Maybe Natural)
- data CallAnalyticsJobSettings = CallAnalyticsJobSettings' {}
- newCallAnalyticsJobSettings :: CallAnalyticsJobSettings
- callAnalyticsJobSettings_contentRedaction :: Lens' CallAnalyticsJobSettings (Maybe ContentRedaction)
- callAnalyticsJobSettings_languageOptions :: Lens' CallAnalyticsJobSettings (Maybe (NonEmpty LanguageCode))
- callAnalyticsJobSettings_vocabularyName :: Lens' CallAnalyticsJobSettings (Maybe Text)
- callAnalyticsJobSettings_languageModelName :: Lens' CallAnalyticsJobSettings (Maybe Text)
- callAnalyticsJobSettings_vocabularyFilterName :: Lens' CallAnalyticsJobSettings (Maybe Text)
- callAnalyticsJobSettings_vocabularyFilterMethod :: Lens' CallAnalyticsJobSettings (Maybe VocabularyFilterMethod)
- data CallAnalyticsJobSummary = CallAnalyticsJobSummary' {}
- newCallAnalyticsJobSummary :: CallAnalyticsJobSummary
- callAnalyticsJobSummary_creationTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime)
- callAnalyticsJobSummary_failureReason :: Lens' CallAnalyticsJobSummary (Maybe Text)
- callAnalyticsJobSummary_callAnalyticsJobStatus :: Lens' CallAnalyticsJobSummary (Maybe CallAnalyticsJobStatus)
- callAnalyticsJobSummary_languageCode :: Lens' CallAnalyticsJobSummary (Maybe LanguageCode)
- callAnalyticsJobSummary_startTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime)
- callAnalyticsJobSummary_completionTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime)
- callAnalyticsJobSummary_callAnalyticsJobName :: Lens' CallAnalyticsJobSummary (Maybe Text)
- data CategoryProperties = CategoryProperties' {
- rules :: Maybe (NonEmpty Rule)
- categoryName :: Maybe Text
- lastUpdateTime :: Maybe POSIX
- createTime :: Maybe POSIX
- newCategoryProperties :: CategoryProperties
- categoryProperties_rules :: Lens' CategoryProperties (Maybe (NonEmpty Rule))
- categoryProperties_categoryName :: Lens' CategoryProperties (Maybe Text)
- categoryProperties_lastUpdateTime :: Lens' CategoryProperties (Maybe UTCTime)
- categoryProperties_createTime :: Lens' CategoryProperties (Maybe UTCTime)
- data ChannelDefinition = ChannelDefinition' {}
- newChannelDefinition :: ChannelDefinition
- channelDefinition_participantRole :: Lens' ChannelDefinition (Maybe ParticipantRole)
- channelDefinition_channelId :: Lens' ChannelDefinition (Maybe Natural)
- data ContentRedaction = ContentRedaction' {}
- newContentRedaction :: RedactionType -> RedactionOutput -> ContentRedaction
- contentRedaction_redactionType :: Lens' ContentRedaction RedactionType
- contentRedaction_redactionOutput :: Lens' ContentRedaction RedactionOutput
- data InputDataConfig = InputDataConfig' {}
- newInputDataConfig :: Text -> Text -> InputDataConfig
- inputDataConfig_tuningDataS3Uri :: Lens' InputDataConfig (Maybe Text)
- inputDataConfig_s3Uri :: Lens' InputDataConfig Text
- inputDataConfig_dataAccessRoleArn :: Lens' InputDataConfig Text
- data InterruptionFilter = InterruptionFilter' {}
- newInterruptionFilter :: InterruptionFilter
- interruptionFilter_participantRole :: Lens' InterruptionFilter (Maybe ParticipantRole)
- interruptionFilter_relativeTimeRange :: Lens' InterruptionFilter (Maybe RelativeTimeRange)
- interruptionFilter_negate :: Lens' InterruptionFilter (Maybe Bool)
- interruptionFilter_threshold :: Lens' InterruptionFilter (Maybe Natural)
- interruptionFilter_absoluteTimeRange :: Lens' InterruptionFilter (Maybe AbsoluteTimeRange)
- data JobExecutionSettings = JobExecutionSettings' {}
- newJobExecutionSettings :: JobExecutionSettings
- jobExecutionSettings_dataAccessRoleArn :: Lens' JobExecutionSettings (Maybe Text)
- jobExecutionSettings_allowDeferredExecution :: Lens' JobExecutionSettings (Maybe Bool)
- data LanguageModel = LanguageModel' {}
- newLanguageModel :: LanguageModel
- languageModel_failureReason :: Lens' LanguageModel (Maybe Text)
- languageModel_languageCode :: Lens' LanguageModel (Maybe CLMLanguageCode)
- languageModel_modelName :: Lens' LanguageModel (Maybe Text)
- languageModel_lastModifiedTime :: Lens' LanguageModel (Maybe UTCTime)
- languageModel_upgradeAvailability :: Lens' LanguageModel (Maybe Bool)
- languageModel_inputDataConfig :: Lens' LanguageModel (Maybe InputDataConfig)
- languageModel_baseModelName :: Lens' LanguageModel (Maybe BaseModelName)
- languageModel_modelStatus :: Lens' LanguageModel (Maybe ModelStatus)
- languageModel_createTime :: Lens' LanguageModel (Maybe UTCTime)
- data Media = Media' {}
- newMedia :: Media
- media_mediaFileUri :: Lens' Media (Maybe Text)
- media_redactedMediaFileUri :: Lens' Media (Maybe Text)
- data MedicalTranscript = MedicalTranscript' {}
- newMedicalTranscript :: MedicalTranscript
- medicalTranscript_transcriptFileUri :: Lens' MedicalTranscript (Maybe Text)
- data MedicalTranscriptionJob = MedicalTranscriptionJob' {
- creationTime :: Maybe POSIX
- specialty :: Maybe Specialty
- failureReason :: Maybe Text
- languageCode :: Maybe LanguageCode
- settings :: Maybe MedicalTranscriptionSetting
- startTime :: Maybe POSIX
- completionTime :: Maybe POSIX
- media :: Maybe Media
- mediaFormat :: Maybe MediaFormat
- medicalTranscriptionJobName :: Maybe Text
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- type' :: Maybe Type
- contentIdentificationType :: Maybe MedicalContentIdentificationType
- transcript :: Maybe MedicalTranscript
- tags :: Maybe (NonEmpty Tag)
- mediaSampleRateHertz :: Maybe Natural
- newMedicalTranscriptionJob :: MedicalTranscriptionJob
- medicalTranscriptionJob_creationTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime)
- medicalTranscriptionJob_specialty :: Lens' MedicalTranscriptionJob (Maybe Specialty)
- medicalTranscriptionJob_failureReason :: Lens' MedicalTranscriptionJob (Maybe Text)
- medicalTranscriptionJob_languageCode :: Lens' MedicalTranscriptionJob (Maybe LanguageCode)
- medicalTranscriptionJob_settings :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscriptionSetting)
- medicalTranscriptionJob_startTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime)
- medicalTranscriptionJob_completionTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime)
- medicalTranscriptionJob_media :: Lens' MedicalTranscriptionJob (Maybe Media)
- medicalTranscriptionJob_mediaFormat :: Lens' MedicalTranscriptionJob (Maybe MediaFormat)
- medicalTranscriptionJob_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJob (Maybe Text)
- medicalTranscriptionJob_transcriptionJobStatus :: Lens' MedicalTranscriptionJob (Maybe TranscriptionJobStatus)
- medicalTranscriptionJob_type :: Lens' MedicalTranscriptionJob (Maybe Type)
- medicalTranscriptionJob_contentIdentificationType :: Lens' MedicalTranscriptionJob (Maybe MedicalContentIdentificationType)
- medicalTranscriptionJob_transcript :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscript)
- medicalTranscriptionJob_tags :: Lens' MedicalTranscriptionJob (Maybe (NonEmpty Tag))
- medicalTranscriptionJob_mediaSampleRateHertz :: Lens' MedicalTranscriptionJob (Maybe Natural)
- data MedicalTranscriptionJobSummary = MedicalTranscriptionJobSummary' {
- creationTime :: Maybe POSIX
- specialty :: Maybe Specialty
- failureReason :: Maybe Text
- languageCode :: Maybe LanguageCode
- outputLocationType :: Maybe OutputLocationType
- startTime :: Maybe POSIX
- completionTime :: Maybe POSIX
- medicalTranscriptionJobName :: Maybe Text
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- type' :: Maybe Type
- contentIdentificationType :: Maybe MedicalContentIdentificationType
- newMedicalTranscriptionJobSummary :: MedicalTranscriptionJobSummary
- medicalTranscriptionJobSummary_creationTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime)
- medicalTranscriptionJobSummary_specialty :: Lens' MedicalTranscriptionJobSummary (Maybe Specialty)
- medicalTranscriptionJobSummary_failureReason :: Lens' MedicalTranscriptionJobSummary (Maybe Text)
- medicalTranscriptionJobSummary_languageCode :: Lens' MedicalTranscriptionJobSummary (Maybe LanguageCode)
- medicalTranscriptionJobSummary_outputLocationType :: Lens' MedicalTranscriptionJobSummary (Maybe OutputLocationType)
- medicalTranscriptionJobSummary_startTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime)
- medicalTranscriptionJobSummary_completionTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime)
- medicalTranscriptionJobSummary_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJobSummary (Maybe Text)
- medicalTranscriptionJobSummary_transcriptionJobStatus :: Lens' MedicalTranscriptionJobSummary (Maybe TranscriptionJobStatus)
- medicalTranscriptionJobSummary_type :: Lens' MedicalTranscriptionJobSummary (Maybe Type)
- medicalTranscriptionJobSummary_contentIdentificationType :: Lens' MedicalTranscriptionJobSummary (Maybe MedicalContentIdentificationType)
- data MedicalTranscriptionSetting = MedicalTranscriptionSetting' {}
- newMedicalTranscriptionSetting :: MedicalTranscriptionSetting
- medicalTranscriptionSetting_vocabularyName :: Lens' MedicalTranscriptionSetting (Maybe Text)
- medicalTranscriptionSetting_maxAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Natural)
- medicalTranscriptionSetting_channelIdentification :: Lens' MedicalTranscriptionSetting (Maybe Bool)
- medicalTranscriptionSetting_showAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Bool)
- medicalTranscriptionSetting_maxSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Natural)
- medicalTranscriptionSetting_showSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Bool)
- data ModelSettings = ModelSettings' {}
- newModelSettings :: ModelSettings
- modelSettings_languageModelName :: Lens' ModelSettings (Maybe Text)
- data NonTalkTimeFilter = NonTalkTimeFilter' {}
- newNonTalkTimeFilter :: NonTalkTimeFilter
- nonTalkTimeFilter_relativeTimeRange :: Lens' NonTalkTimeFilter (Maybe RelativeTimeRange)
- nonTalkTimeFilter_negate :: Lens' NonTalkTimeFilter (Maybe Bool)
- nonTalkTimeFilter_threshold :: Lens' NonTalkTimeFilter (Maybe Natural)
- nonTalkTimeFilter_absoluteTimeRange :: Lens' NonTalkTimeFilter (Maybe AbsoluteTimeRange)
- data RelativeTimeRange = RelativeTimeRange' {}
- newRelativeTimeRange :: RelativeTimeRange
- relativeTimeRange_endPercentage :: Lens' RelativeTimeRange (Maybe Natural)
- relativeTimeRange_first :: Lens' RelativeTimeRange (Maybe Natural)
- relativeTimeRange_last :: Lens' RelativeTimeRange (Maybe Natural)
- relativeTimeRange_startPercentage :: Lens' RelativeTimeRange (Maybe Natural)
- data Rule = Rule' {}
- newRule :: Rule
- rule_nonTalkTimeFilter :: Lens' Rule (Maybe NonTalkTimeFilter)
- rule_transcriptFilter :: Lens' Rule (Maybe TranscriptFilter)
- rule_sentimentFilter :: Lens' Rule (Maybe SentimentFilter)
- rule_interruptionFilter :: Lens' Rule (Maybe InterruptionFilter)
- data SentimentFilter = SentimentFilter' {}
- newSentimentFilter :: NonEmpty SentimentValue -> SentimentFilter
- sentimentFilter_participantRole :: Lens' SentimentFilter (Maybe ParticipantRole)
- sentimentFilter_relativeTimeRange :: Lens' SentimentFilter (Maybe RelativeTimeRange)
- sentimentFilter_negate :: Lens' SentimentFilter (Maybe Bool)
- sentimentFilter_absoluteTimeRange :: Lens' SentimentFilter (Maybe AbsoluteTimeRange)
- sentimentFilter_sentiments :: Lens' SentimentFilter (NonEmpty SentimentValue)
- data Settings = Settings' {}
- newSettings :: Settings
- settings_vocabularyName :: Lens' Settings (Maybe Text)
- settings_maxAlternatives :: Lens' Settings (Maybe Natural)
- settings_channelIdentification :: Lens' Settings (Maybe Bool)
- settings_showAlternatives :: Lens' Settings (Maybe Bool)
- settings_maxSpeakerLabels :: Lens' Settings (Maybe Natural)
- settings_vocabularyFilterName :: Lens' Settings (Maybe Text)
- settings_showSpeakerLabels :: Lens' Settings (Maybe Bool)
- settings_vocabularyFilterMethod :: Lens' Settings (Maybe VocabularyFilterMethod)
- data Subtitles = Subtitles' {
- formats :: Maybe [SubtitleFormat]
- newSubtitles :: Subtitles
- subtitles_formats :: Lens' Subtitles (Maybe [SubtitleFormat])
- data SubtitlesOutput = SubtitlesOutput' {
- formats :: Maybe [SubtitleFormat]
- subtitleFileUris :: Maybe [Text]
- newSubtitlesOutput :: SubtitlesOutput
- subtitlesOutput_formats :: Lens' SubtitlesOutput (Maybe [SubtitleFormat])
- subtitlesOutput_subtitleFileUris :: Lens' SubtitlesOutput (Maybe [Text])
- data Tag = Tag' {}
- newTag :: Text -> Text -> Tag
- tag_key :: Lens' Tag Text
- tag_value :: Lens' Tag Text
- data Transcript = Transcript' {}
- newTranscript :: Transcript
- transcript_redactedTranscriptFileUri :: Lens' Transcript (Maybe Text)
- transcript_transcriptFileUri :: Lens' Transcript (Maybe Text)
- data TranscriptFilter = TranscriptFilter' {}
- newTranscriptFilter :: TranscriptFilterType -> NonEmpty Text -> TranscriptFilter
- transcriptFilter_participantRole :: Lens' TranscriptFilter (Maybe ParticipantRole)
- transcriptFilter_relativeTimeRange :: Lens' TranscriptFilter (Maybe RelativeTimeRange)
- transcriptFilter_negate :: Lens' TranscriptFilter (Maybe Bool)
- transcriptFilter_absoluteTimeRange :: Lens' TranscriptFilter (Maybe AbsoluteTimeRange)
- transcriptFilter_transcriptFilterType :: Lens' TranscriptFilter TranscriptFilterType
- transcriptFilter_targets :: Lens' TranscriptFilter (NonEmpty Text)
- data TranscriptionJob = TranscriptionJob' {
- creationTime :: Maybe POSIX
- failureReason :: Maybe Text
- contentRedaction :: Maybe ContentRedaction
- identifiedLanguageScore :: Maybe Double
- subtitles :: Maybe SubtitlesOutput
- languageCode :: Maybe LanguageCode
- languageOptions :: Maybe (NonEmpty LanguageCode)
- settings :: Maybe Settings
- startTime :: Maybe POSIX
- completionTime :: Maybe POSIX
- media :: Maybe Media
- mediaFormat :: Maybe MediaFormat
- modelSettings :: Maybe ModelSettings
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- jobExecutionSettings :: Maybe JobExecutionSettings
- transcriptionJobName :: Maybe Text
- identifyLanguage :: Maybe Bool
- transcript :: Maybe Transcript
- tags :: Maybe (NonEmpty Tag)
- mediaSampleRateHertz :: Maybe Natural
- newTranscriptionJob :: TranscriptionJob
- transcriptionJob_creationTime :: Lens' TranscriptionJob (Maybe UTCTime)
- transcriptionJob_failureReason :: Lens' TranscriptionJob (Maybe Text)
- transcriptionJob_contentRedaction :: Lens' TranscriptionJob (Maybe ContentRedaction)
- transcriptionJob_identifiedLanguageScore :: Lens' TranscriptionJob (Maybe Double)
- transcriptionJob_subtitles :: Lens' TranscriptionJob (Maybe SubtitlesOutput)
- transcriptionJob_languageCode :: Lens' TranscriptionJob (Maybe LanguageCode)
- transcriptionJob_languageOptions :: Lens' TranscriptionJob (Maybe (NonEmpty LanguageCode))
- transcriptionJob_settings :: Lens' TranscriptionJob (Maybe Settings)
- transcriptionJob_startTime :: Lens' TranscriptionJob (Maybe UTCTime)
- transcriptionJob_completionTime :: Lens' TranscriptionJob (Maybe UTCTime)
- transcriptionJob_media :: Lens' TranscriptionJob (Maybe Media)
- transcriptionJob_mediaFormat :: Lens' TranscriptionJob (Maybe MediaFormat)
- transcriptionJob_modelSettings :: Lens' TranscriptionJob (Maybe ModelSettings)
- transcriptionJob_transcriptionJobStatus :: Lens' TranscriptionJob (Maybe TranscriptionJobStatus)
- transcriptionJob_jobExecutionSettings :: Lens' TranscriptionJob (Maybe JobExecutionSettings)
- transcriptionJob_transcriptionJobName :: Lens' TranscriptionJob (Maybe Text)
- transcriptionJob_identifyLanguage :: Lens' TranscriptionJob (Maybe Bool)
- transcriptionJob_transcript :: Lens' TranscriptionJob (Maybe Transcript)
- transcriptionJob_tags :: Lens' TranscriptionJob (Maybe (NonEmpty Tag))
- transcriptionJob_mediaSampleRateHertz :: Lens' TranscriptionJob (Maybe Natural)
- data TranscriptionJobSummary = TranscriptionJobSummary' {
- creationTime :: Maybe POSIX
- failureReason :: Maybe Text
- contentRedaction :: Maybe ContentRedaction
- identifiedLanguageScore :: Maybe Double
- languageCode :: Maybe LanguageCode
- outputLocationType :: Maybe OutputLocationType
- startTime :: Maybe POSIX
- completionTime :: Maybe POSIX
- modelSettings :: Maybe ModelSettings
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- transcriptionJobName :: Maybe Text
- identifyLanguage :: Maybe Bool
- newTranscriptionJobSummary :: TranscriptionJobSummary
- transcriptionJobSummary_creationTime :: Lens' TranscriptionJobSummary (Maybe UTCTime)
- transcriptionJobSummary_failureReason :: Lens' TranscriptionJobSummary (Maybe Text)
- transcriptionJobSummary_contentRedaction :: Lens' TranscriptionJobSummary (Maybe ContentRedaction)
- transcriptionJobSummary_identifiedLanguageScore :: Lens' TranscriptionJobSummary (Maybe Double)
- transcriptionJobSummary_languageCode :: Lens' TranscriptionJobSummary (Maybe LanguageCode)
- transcriptionJobSummary_outputLocationType :: Lens' TranscriptionJobSummary (Maybe OutputLocationType)
- transcriptionJobSummary_startTime :: Lens' TranscriptionJobSummary (Maybe UTCTime)
- transcriptionJobSummary_completionTime :: Lens' TranscriptionJobSummary (Maybe UTCTime)
- transcriptionJobSummary_modelSettings :: Lens' TranscriptionJobSummary (Maybe ModelSettings)
- transcriptionJobSummary_transcriptionJobStatus :: Lens' TranscriptionJobSummary (Maybe TranscriptionJobStatus)
- transcriptionJobSummary_transcriptionJobName :: Lens' TranscriptionJobSummary (Maybe Text)
- transcriptionJobSummary_identifyLanguage :: Lens' TranscriptionJobSummary (Maybe Bool)
- data VocabularyFilterInfo = VocabularyFilterInfo' {}
- newVocabularyFilterInfo :: VocabularyFilterInfo
- vocabularyFilterInfo_languageCode :: Lens' VocabularyFilterInfo (Maybe LanguageCode)
- vocabularyFilterInfo_lastModifiedTime :: Lens' VocabularyFilterInfo (Maybe UTCTime)
- vocabularyFilterInfo_vocabularyFilterName :: Lens' VocabularyFilterInfo (Maybe Text)
- data VocabularyInfo = VocabularyInfo' {}
- newVocabularyInfo :: VocabularyInfo
- vocabularyInfo_languageCode :: Lens' VocabularyInfo (Maybe LanguageCode)
- vocabularyInfo_vocabularyName :: Lens' VocabularyInfo (Maybe Text)
- vocabularyInfo_lastModifiedTime :: Lens' VocabularyInfo (Maybe UTCTime)
- vocabularyInfo_vocabularyState :: Lens' VocabularyInfo (Maybe VocabularyState)
Service Configuration
defaultService :: Service Source #
API version 2017-10-26
of the Amazon Transcribe Service SDK configuration.
Errors
_ConflictException :: AsError a => Getting (First ServiceError) a ServiceError Source #
There is already a resource with that name.
_NotFoundException :: AsError a => Getting (First ServiceError) a ServiceError Source #
We can't find the requested resource. Check the name and try your request again.
_InternalFailureException :: AsError a => Getting (First ServiceError) a ServiceError Source #
There was an internal error. Check the error message and try your request again.
_BadRequestException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Your request didn't pass one or more validation tests. For example, if
the entity that you're trying to delete doesn't exist or if it is in a
non-terminal state (for example, it's "in progress"). See the
exception Message
field for more information.
_LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Either you have sent too many requests or your input file is too long. Wait before you resend your request, or use a smaller file and resend the request.
BaseModelName
newtype BaseModelName Source #
pattern BaseModelName_NarrowBand :: BaseModelName | |
pattern BaseModelName_WideBand :: BaseModelName |
Instances
CLMLanguageCode
newtype CLMLanguageCode Source #
pattern CLMLanguageCode_En_AU :: CLMLanguageCode | |
pattern CLMLanguageCode_En_GB :: CLMLanguageCode | |
pattern CLMLanguageCode_En_US :: CLMLanguageCode | |
pattern CLMLanguageCode_Es_US :: CLMLanguageCode | |
pattern CLMLanguageCode_Hi_IN :: CLMLanguageCode |
Instances
CallAnalyticsJobStatus
newtype CallAnalyticsJobStatus Source #
Instances
LanguageCode
newtype LanguageCode Source #
Instances
MediaFormat
newtype MediaFormat Source #
pattern MediaFormat_Amr :: MediaFormat | |
pattern MediaFormat_Flac :: MediaFormat | |
pattern MediaFormat_Mp3 :: MediaFormat | |
pattern MediaFormat_Mp4 :: MediaFormat | |
pattern MediaFormat_Ogg :: MediaFormat | |
pattern MediaFormat_Wav :: MediaFormat | |
pattern MediaFormat_Webm :: MediaFormat |
Instances
MedicalContentIdentificationType
newtype MedicalContentIdentificationType Source #
Instances
ModelStatus
newtype ModelStatus Source #
pattern ModelStatus_COMPLETED :: ModelStatus | |
pattern ModelStatus_FAILED :: ModelStatus | |
pattern ModelStatus_IN_PROGRESS :: ModelStatus |
Instances
OutputLocationType
newtype OutputLocationType Source #
pattern OutputLocationType_CUSTOMER_BUCKET :: OutputLocationType | |
pattern OutputLocationType_SERVICE_BUCKET :: OutputLocationType |
Instances
ParticipantRole
newtype ParticipantRole Source #
pattern ParticipantRole_AGENT :: ParticipantRole | |
pattern ParticipantRole_CUSTOMER :: ParticipantRole |
Instances
RedactionOutput
newtype RedactionOutput Source #
pattern RedactionOutput_Redacted :: RedactionOutput | |
pattern RedactionOutput_Redacted_and_unredacted :: RedactionOutput |
Instances
RedactionType
newtype RedactionType Source #
pattern RedactionType_PII :: RedactionType |
Instances
SentimentValue
newtype SentimentValue Source #
pattern SentimentValue_MIXED :: SentimentValue | |
pattern SentimentValue_NEGATIVE :: SentimentValue | |
pattern SentimentValue_NEUTRAL :: SentimentValue | |
pattern SentimentValue_POSITIVE :: SentimentValue |
Instances
Specialty
pattern Specialty_PRIMARYCARE :: Specialty |
Instances
SubtitleFormat
newtype SubtitleFormat Source #
pattern SubtitleFormat_Srt :: SubtitleFormat | |
pattern SubtitleFormat_Vtt :: SubtitleFormat |
Instances
TranscriptFilterType
newtype TranscriptFilterType Source #
pattern TranscriptFilterType_EXACT :: TranscriptFilterType |
Instances
TranscriptionJobStatus
newtype TranscriptionJobStatus Source #
Instances
Type
pattern Type_CONVERSATION :: Type | |
pattern Type_DICTATION :: Type |
Instances
VocabularyFilterMethod
newtype VocabularyFilterMethod Source #
pattern VocabularyFilterMethod_Mask :: VocabularyFilterMethod | |
pattern VocabularyFilterMethod_Remove :: VocabularyFilterMethod | |
pattern VocabularyFilterMethod_Tag :: VocabularyFilterMethod |
Instances
VocabularyState
newtype VocabularyState Source #
pattern VocabularyState_FAILED :: VocabularyState | |
pattern VocabularyState_PENDING :: VocabularyState | |
pattern VocabularyState_READY :: VocabularyState |
Instances
AbsoluteTimeRange
data AbsoluteTimeRange Source #
A time range, set in seconds, between two points in the call.
See: newAbsoluteTimeRange
smart constructor.
AbsoluteTimeRange' | |
|
Instances
newAbsoluteTimeRange :: AbsoluteTimeRange Source #
Create a value of AbsoluteTimeRange
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:first:AbsoluteTimeRange'
, absoluteTimeRange_first
- A time range from the beginning of the call to the value that you've
specified. For example, if you specify 100000, the time range is set to
the first 100,000 milliseconds of the call.
$sel:startTime:AbsoluteTimeRange'
, absoluteTimeRange_startTime
- A value that indicates the beginning of the time range in seconds. To
set absolute time range, you must specify a start time and an end time.
For example, if you specify the following values:
- StartTime - 10000
- Endtime - 50000
The time range is set between 10,000 milliseconds and 50,000 milliseconds into the call.
$sel:last:AbsoluteTimeRange'
, absoluteTimeRange_last
- A time range from the value that you've specified to the end of the
call. For example, if you specify 100000, the time range is set to the
last 100,000 milliseconds of the call.
$sel:endTime:AbsoluteTimeRange'
, absoluteTimeRange_endTime
- A value that indicates the end of the time range in milliseconds. To set
absolute time range, you must specify a start time and an end time. For
example, if you specify the following values:
- StartTime - 10000
- Endtime - 50000
The time range is set between 10,000 milliseconds and 50,000 milliseconds into the call.
absoluteTimeRange_first :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
A time range from the beginning of the call to the value that you've specified. For example, if you specify 100000, the time range is set to the first 100,000 milliseconds of the call.
absoluteTimeRange_startTime :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
A value that indicates the beginning of the time range in seconds. To set absolute time range, you must specify a start time and an end time. For example, if you specify the following values:
- StartTime - 10000
- Endtime - 50000
The time range is set between 10,000 milliseconds and 50,000 milliseconds into the call.
absoluteTimeRange_last :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
A time range from the value that you've specified to the end of the call. For example, if you specify 100000, the time range is set to the last 100,000 milliseconds of the call.
absoluteTimeRange_endTime :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
A value that indicates the end of the time range in milliseconds. To set absolute time range, you must specify a start time and an end time. For example, if you specify the following values:
- StartTime - 10000
- Endtime - 50000
The time range is set between 10,000 milliseconds and 50,000 milliseconds into the call.
CallAnalyticsJob
data CallAnalyticsJob Source #
Describes an asynchronous analytics job that was created with the
StartAnalyticsJob
operation.
See: newCallAnalyticsJob
smart constructor.
CallAnalyticsJob' | |
|
Instances
newCallAnalyticsJob :: CallAnalyticsJob Source #
Create a value of CallAnalyticsJob
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTime:CallAnalyticsJob'
, callAnalyticsJob_creationTime
- A timestamp that shows when the analytics job was created.
$sel:failureReason:CallAnalyticsJob'
, callAnalyticsJob_failureReason
- If the AnalyticsJobStatus
is FAILED
, this field contains information
about why the job failed.
The FailureReason
field can contain one of the following values:
Unsupported media format
: The media format specified in theMediaFormat
field of the request isn't valid. See the description of theMediaFormat
field for a list of valid values.The media format provided does not match the detected media format
: The media format of the audio file doesn't match the format specified in theMediaFormat
field in the request. Check the media format of your media file and make sure the two values match.Invalid sample rate for audio file
: The sample rate specified in theMediaSampleRateHertz
of the request isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.The sample rate provided does not match the detected sample rate
: The sample rate in the audio file doesn't match the sample rate specified in theMediaSampleRateHertz
field in the request. Check the sample rate of your media file and make sure that the two values match.Invalid file size: file size too large
: The size of your audio file is larger than what Amazon Transcribe Medical can process. For more information, see Guidelines and Quotas in the Amazon Transcribe Medical Guide.Invalid number of channels: number of channels too large
: Your audio contains more channels than Amazon Transcribe Medical is configured to process. To request additional channels, see Amazon Transcribe Medical Endpoints and Quotas in the Amazon Web Services General Reference.
$sel:callAnalyticsJobStatus:CallAnalyticsJob'
, callAnalyticsJob_callAnalyticsJobStatus
- The status of the analytics job.
$sel:identifiedLanguageScore:CallAnalyticsJob'
, callAnalyticsJob_identifiedLanguageScore
- A value between zero and one that Amazon Transcribe assigned to the
language that it identified in the source audio. This value appears only
when you don't provide a single language code. Larger values indicate
that Amazon Transcribe has higher confidence in the language that it
identified
$sel:languageCode:CallAnalyticsJob'
, callAnalyticsJob_languageCode
- If you know the language spoken between the customer and the agent,
specify a language code for this field.
If you don't know the language, you can leave this field blank, and Amazon Transcribe will use machine learning to automatically identify the language. To improve the accuracy of language identification, you can provide an array containing the possible language codes for the language spoken in your audio. Refer to Supported languages and language-specific features for additional information.
$sel:settings:CallAnalyticsJob'
, callAnalyticsJob_settings
- Provides information about the settings used to run a transcription job.
$sel:startTime:CallAnalyticsJob'
, callAnalyticsJob_startTime
- A timestamp that shows when the analytics job started processing.
$sel:completionTime:CallAnalyticsJob'
, callAnalyticsJob_completionTime
- A timestamp that shows when the analytics job was completed.
$sel:callAnalyticsJobName:CallAnalyticsJob'
, callAnalyticsJob_callAnalyticsJobName
- The name of the call analytics job.
$sel:media:CallAnalyticsJob'
, callAnalyticsJob_media
- Undocumented member.
$sel:mediaFormat:CallAnalyticsJob'
, callAnalyticsJob_mediaFormat
- The format of the input audio file. Note: for call analytics jobs, only
the following media formats are supported: MP3, MP4, WAV, FLAC, OGG, and
WebM.
$sel:channelDefinitions:CallAnalyticsJob'
, callAnalyticsJob_channelDefinitions
- Shows numeric values to indicate the channel assigned to the agent's
audio and the channel assigned to the customer's audio.
$sel:dataAccessRoleArn:CallAnalyticsJob'
, callAnalyticsJob_dataAccessRoleArn
- The Amazon Resource Number (ARN) that you use to get access to the
analytics job.
$sel:transcript:CallAnalyticsJob'
, callAnalyticsJob_transcript
- Undocumented member.
$sel:mediaSampleRateHertz:CallAnalyticsJob'
, callAnalyticsJob_mediaSampleRateHertz
- The sample rate, in Hertz, of the audio.
callAnalyticsJob_creationTime :: Lens' CallAnalyticsJob (Maybe UTCTime) Source #
A timestamp that shows when the analytics job was created.
callAnalyticsJob_failureReason :: Lens' CallAnalyticsJob (Maybe Text) Source #
If the AnalyticsJobStatus
is FAILED
, this field contains information
about why the job failed.
The FailureReason
field can contain one of the following values:
Unsupported media format
: The media format specified in theMediaFormat
field of the request isn't valid. See the description of theMediaFormat
field for a list of valid values.The media format provided does not match the detected media format
: The media format of the audio file doesn't match the format specified in theMediaFormat
field in the request. Check the media format of your media file and make sure the two values match.Invalid sample rate for audio file
: The sample rate specified in theMediaSampleRateHertz
of the request isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.The sample rate provided does not match the detected sample rate
: The sample rate in the audio file doesn't match the sample rate specified in theMediaSampleRateHertz
field in the request. Check the sample rate of your media file and make sure that the two values match.Invalid file size: file size too large
: The size of your audio file is larger than what Amazon Transcribe Medical can process. For more information, see Guidelines and Quotas in the Amazon Transcribe Medical Guide.Invalid number of channels: number of channels too large
: Your audio contains more channels than Amazon Transcribe Medical is configured to process. To request additional channels, see Amazon Transcribe Medical Endpoints and Quotas in the Amazon Web Services General Reference.
callAnalyticsJob_callAnalyticsJobStatus :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobStatus) Source #
The status of the analytics job.
callAnalyticsJob_identifiedLanguageScore :: Lens' CallAnalyticsJob (Maybe Double) Source #
A value between zero and one that Amazon Transcribe assigned to the language that it identified in the source audio. This value appears only when you don't provide a single language code. Larger values indicate that Amazon Transcribe has higher confidence in the language that it identified
callAnalyticsJob_languageCode :: Lens' CallAnalyticsJob (Maybe LanguageCode) Source #
If you know the language spoken between the customer and the agent, specify a language code for this field.
If you don't know the language, you can leave this field blank, and Amazon Transcribe will use machine learning to automatically identify the language. To improve the accuracy of language identification, you can provide an array containing the possible language codes for the language spoken in your audio. Refer to Supported languages and language-specific features for additional information.
callAnalyticsJob_settings :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobSettings) Source #
Provides information about the settings used to run a transcription job.
callAnalyticsJob_startTime :: Lens' CallAnalyticsJob (Maybe UTCTime) Source #
A timestamp that shows when the analytics job started processing.
callAnalyticsJob_completionTime :: Lens' CallAnalyticsJob (Maybe UTCTime) Source #
A timestamp that shows when the analytics job was completed.
callAnalyticsJob_callAnalyticsJobName :: Lens' CallAnalyticsJob (Maybe Text) Source #
The name of the call analytics job.
callAnalyticsJob_media :: Lens' CallAnalyticsJob (Maybe Media) Source #
Undocumented member.
callAnalyticsJob_mediaFormat :: Lens' CallAnalyticsJob (Maybe MediaFormat) Source #
The format of the input audio file. Note: for call analytics jobs, only the following media formats are supported: MP3, MP4, WAV, FLAC, OGG, and WebM.
callAnalyticsJob_channelDefinitions :: Lens' CallAnalyticsJob (Maybe (NonEmpty ChannelDefinition)) Source #
Shows numeric values to indicate the channel assigned to the agent's audio and the channel assigned to the customer's audio.
callAnalyticsJob_dataAccessRoleArn :: Lens' CallAnalyticsJob (Maybe Text) Source #
The Amazon Resource Number (ARN) that you use to get access to the analytics job.
callAnalyticsJob_transcript :: Lens' CallAnalyticsJob (Maybe Transcript) Source #
Undocumented member.
callAnalyticsJob_mediaSampleRateHertz :: Lens' CallAnalyticsJob (Maybe Natural) Source #
The sample rate, in Hertz, of the audio.
CallAnalyticsJobSettings
data CallAnalyticsJobSettings Source #
Provides optional settings for the CallAnalyticsJob
operation.
See: newCallAnalyticsJobSettings
smart constructor.
CallAnalyticsJobSettings' | |
|
Instances
newCallAnalyticsJobSettings :: CallAnalyticsJobSettings Source #
Create a value of CallAnalyticsJobSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:contentRedaction:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_contentRedaction
- Undocumented member.
$sel:languageOptions:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_languageOptions
- When you run a call analytics job, you can specify the language spoken
in the audio, or you can have Amazon Transcribe identify the language
for you.
To specify a language, specify an array with one language code. If you don't know the language, you can leave this field blank and Amazon Transcribe will use machine learning to identify the language for you. To improve the ability of Amazon Transcribe to correctly identify the language, you can provide an array of the languages that can be present in the audio. Refer to Supported languages and language-specific features for additional information.
$sel:vocabularyName:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_vocabularyName
- The name of a vocabulary to use when processing the call analytics job.
$sel:languageModelName:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_languageModelName
- The structure used to describe a custom language model.
$sel:vocabularyFilterName:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_vocabularyFilterName
- The name of the vocabulary filter to use when running a call analytics
job. The filter that you specify must have the same language code as the
analytics job.
$sel:vocabularyFilterMethod:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_vocabularyFilterMethod
- Set to mask to remove filtered text from the transcript and replace it
with three asterisks ("***") as placeholder text. Set to remove
to
remove filtered text from the transcript without using placeholder text.
Set to tag
to mark the word in the transcription output that matches
the vocabulary filter. When you set the filter method to tag
, the
words matching your vocabulary filter are not masked or removed.
callAnalyticsJobSettings_contentRedaction :: Lens' CallAnalyticsJobSettings (Maybe ContentRedaction) Source #
Undocumented member.
callAnalyticsJobSettings_languageOptions :: Lens' CallAnalyticsJobSettings (Maybe (NonEmpty LanguageCode)) Source #
When you run a call analytics job, you can specify the language spoken in the audio, or you can have Amazon Transcribe identify the language for you.
To specify a language, specify an array with one language code. If you don't know the language, you can leave this field blank and Amazon Transcribe will use machine learning to identify the language for you. To improve the ability of Amazon Transcribe to correctly identify the language, you can provide an array of the languages that can be present in the audio. Refer to Supported languages and language-specific features for additional information.
callAnalyticsJobSettings_vocabularyName :: Lens' CallAnalyticsJobSettings (Maybe Text) Source #
The name of a vocabulary to use when processing the call analytics job.
callAnalyticsJobSettings_languageModelName :: Lens' CallAnalyticsJobSettings (Maybe Text) Source #
The structure used to describe a custom language model.
callAnalyticsJobSettings_vocabularyFilterName :: Lens' CallAnalyticsJobSettings (Maybe Text) Source #
The name of the vocabulary filter to use when running a call analytics job. The filter that you specify must have the same language code as the analytics job.
callAnalyticsJobSettings_vocabularyFilterMethod :: Lens' CallAnalyticsJobSettings (Maybe VocabularyFilterMethod) Source #
Set to mask to remove filtered text from the transcript and replace it
with three asterisks ("***") as placeholder text. Set to remove
to
remove filtered text from the transcript without using placeholder text.
Set to tag
to mark the word in the transcription output that matches
the vocabulary filter. When you set the filter method to tag
, the
words matching your vocabulary filter are not masked or removed.
CallAnalyticsJobSummary
data CallAnalyticsJobSummary Source #
Provides summary information about a call analytics job.
See: newCallAnalyticsJobSummary
smart constructor.
CallAnalyticsJobSummary' | |
|
Instances
newCallAnalyticsJobSummary :: CallAnalyticsJobSummary Source #
Create a value of CallAnalyticsJobSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTime:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_creationTime
- A timestamp that shows when the call analytics job was created.
$sel:failureReason:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_failureReason
- If the CallAnalyticsJobStatus
is FAILED
, a description of the error.
$sel:callAnalyticsJobStatus:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_callAnalyticsJobStatus
- The status of the call analytics job.
$sel:languageCode:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_languageCode
- The language of the transcript in the source audio file.
$sel:startTime:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_startTime
- A timestamp that shows when the job began processing.
$sel:completionTime:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_completionTime
- A timestamp that shows when the job was completed.
$sel:callAnalyticsJobName:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_callAnalyticsJobName
- The name of the call analytics job.
callAnalyticsJobSummary_creationTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the call analytics job was created.
callAnalyticsJobSummary_failureReason :: Lens' CallAnalyticsJobSummary (Maybe Text) Source #
If the CallAnalyticsJobStatus
is FAILED
, a description of the error.
callAnalyticsJobSummary_callAnalyticsJobStatus :: Lens' CallAnalyticsJobSummary (Maybe CallAnalyticsJobStatus) Source #
The status of the call analytics job.
callAnalyticsJobSummary_languageCode :: Lens' CallAnalyticsJobSummary (Maybe LanguageCode) Source #
The language of the transcript in the source audio file.
callAnalyticsJobSummary_startTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the job began processing.
callAnalyticsJobSummary_completionTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the job was completed.
callAnalyticsJobSummary_callAnalyticsJobName :: Lens' CallAnalyticsJobSummary (Maybe Text) Source #
The name of the call analytics job.
CategoryProperties
data CategoryProperties Source #
An object that contains the rules and additional information about a call analytics category.
See: newCategoryProperties
smart constructor.
CategoryProperties' | |
|
Instances
newCategoryProperties :: CategoryProperties Source #
Create a value of CategoryProperties
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:rules:CategoryProperties'
, categoryProperties_rules
- The rules used to create a call analytics category.
$sel:categoryName:CategoryProperties'
, categoryProperties_categoryName
- The name of the call analytics category.
$sel:lastUpdateTime:CategoryProperties'
, categoryProperties_lastUpdateTime
- A timestamp that shows when the call analytics category was most
recently updated.
$sel:createTime:CategoryProperties'
, categoryProperties_createTime
- A timestamp that shows when the call analytics category was created.
categoryProperties_rules :: Lens' CategoryProperties (Maybe (NonEmpty Rule)) Source #
The rules used to create a call analytics category.
categoryProperties_categoryName :: Lens' CategoryProperties (Maybe Text) Source #
The name of the call analytics category.
categoryProperties_lastUpdateTime :: Lens' CategoryProperties (Maybe UTCTime) Source #
A timestamp that shows when the call analytics category was most recently updated.
categoryProperties_createTime :: Lens' CategoryProperties (Maybe UTCTime) Source #
A timestamp that shows when the call analytics category was created.
ChannelDefinition
data ChannelDefinition Source #
For a call analytics job, an object that indicates the audio channel that belongs to the agent and the audio channel that belongs to the customer.
See: newChannelDefinition
smart constructor.
ChannelDefinition' | |
|
Instances
newChannelDefinition :: ChannelDefinition Source #
Create a value of ChannelDefinition
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:participantRole:ChannelDefinition'
, channelDefinition_participantRole
- Indicates whether the person speaking on the audio channel is the agent
or customer.
$sel:channelId:ChannelDefinition'
, channelDefinition_channelId
- A value that indicates the audio channel.
channelDefinition_participantRole :: Lens' ChannelDefinition (Maybe ParticipantRole) Source #
Indicates whether the person speaking on the audio channel is the agent or customer.
channelDefinition_channelId :: Lens' ChannelDefinition (Maybe Natural) Source #
A value that indicates the audio channel.
ContentRedaction
data ContentRedaction Source #
Settings for content redaction within a transcription job.
See: newContentRedaction
smart constructor.
ContentRedaction' | |
|
Instances
Create a value of ContentRedaction
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:redactionType:ContentRedaction'
, contentRedaction_redactionType
- Request parameter that defines the entities to be redacted. The only
accepted value is PII
.
$sel:redactionOutput:ContentRedaction'
, contentRedaction_redactionOutput
- The output transcript file stored in either the default S3 bucket or in
a bucket you specify.
When you choose redacted
Amazon Transcribe outputs only the redacted
transcript.
When you choose redacted_and_unredacted
Amazon Transcribe outputs both
the redacted and unredacted transcripts.
contentRedaction_redactionType :: Lens' ContentRedaction RedactionType Source #
Request parameter that defines the entities to be redacted. The only
accepted value is PII
.
contentRedaction_redactionOutput :: Lens' ContentRedaction RedactionOutput Source #
The output transcript file stored in either the default S3 bucket or in a bucket you specify.
When you choose redacted
Amazon Transcribe outputs only the redacted
transcript.
When you choose redacted_and_unredacted
Amazon Transcribe outputs both
the redacted and unredacted transcripts.
InputDataConfig
data InputDataConfig Source #
The object that contains the Amazon S3 object location and access role required to train and tune your custom language model.
See: newInputDataConfig
smart constructor.
InputDataConfig' | |
|
Instances
Create a value of InputDataConfig
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:tuningDataS3Uri:InputDataConfig'
, inputDataConfig_tuningDataS3Uri
- The Amazon S3 prefix you specify to access the plain text files that you
use to tune your custom language model.
$sel:s3Uri:InputDataConfig'
, inputDataConfig_s3Uri
- The Amazon S3 prefix you specify to access the plain text files that you
use to train your custom language model.
$sel:dataAccessRoleArn:InputDataConfig'
, inputDataConfig_dataAccessRoleArn
- The Amazon Resource Name (ARN) that uniquely identifies the permissions
you've given Amazon Transcribe to access your Amazon S3 buckets
containing your media files or text data.
inputDataConfig_tuningDataS3Uri :: Lens' InputDataConfig (Maybe Text) Source #
The Amazon S3 prefix you specify to access the plain text files that you use to tune your custom language model.
inputDataConfig_s3Uri :: Lens' InputDataConfig Text Source #
The Amazon S3 prefix you specify to access the plain text files that you use to train your custom language model.
inputDataConfig_dataAccessRoleArn :: Lens' InputDataConfig Text Source #
The Amazon Resource Name (ARN) that uniquely identifies the permissions you've given Amazon Transcribe to access your Amazon S3 buckets containing your media files or text data.
InterruptionFilter
data InterruptionFilter Source #
An object that enables you to configure your category to be applied to call analytics jobs where either the customer or agent was interrupted.
See: newInterruptionFilter
smart constructor.
InterruptionFilter' | |
|
Instances
newInterruptionFilter :: InterruptionFilter Source #
Create a value of InterruptionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:participantRole:InterruptionFilter'
, interruptionFilter_participantRole
- Indicates whether the caller or customer was interrupting.
$sel:relativeTimeRange:InterruptionFilter'
, interruptionFilter_relativeTimeRange
- An object that allows percentages to specify the proportion of the call
where there was a interruption. For example, you can specify the first
half of the call. You can also specify the period of time between
halfway through to three-quarters of the way through the call. Because
the length of conversation can vary between calls, you can apply
relative time ranges across all calls.
$sel:negate:InterruptionFilter'
, interruptionFilter_negate
- Set to TRUE
to look for a time period where there was no interruption.
$sel:threshold:InterruptionFilter'
, interruptionFilter_threshold
- The duration of the interruption.
$sel:absoluteTimeRange:InterruptionFilter'
, interruptionFilter_absoluteTimeRange
- An object you can use to specify a time range (in milliseconds) for when
you'd want to find the interruption. For example, you could search for
an interruption between the 30,000 millisecond mark and the 45,000
millisecond mark. You could also specify the time period as the first
15,000 milliseconds or the last 15,000 milliseconds.
interruptionFilter_participantRole :: Lens' InterruptionFilter (Maybe ParticipantRole) Source #
Indicates whether the caller or customer was interrupting.
interruptionFilter_relativeTimeRange :: Lens' InterruptionFilter (Maybe RelativeTimeRange) Source #
An object that allows percentages to specify the proportion of the call where there was a interruption. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.
interruptionFilter_negate :: Lens' InterruptionFilter (Maybe Bool) Source #
Set to TRUE
to look for a time period where there was no interruption.
interruptionFilter_threshold :: Lens' InterruptionFilter (Maybe Natural) Source #
The duration of the interruption.
interruptionFilter_absoluteTimeRange :: Lens' InterruptionFilter (Maybe AbsoluteTimeRange) Source #
An object you can use to specify a time range (in milliseconds) for when you'd want to find the interruption. For example, you could search for an interruption between the 30,000 millisecond mark and the 45,000 millisecond mark. You could also specify the time period as the first 15,000 milliseconds or the last 15,000 milliseconds.
JobExecutionSettings
data JobExecutionSettings Source #
Provides information about when a transcription job should be executed.
See: newJobExecutionSettings
smart constructor.
JobExecutionSettings' | |
|
Instances
newJobExecutionSettings :: JobExecutionSettings Source #
Create a value of JobExecutionSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataAccessRoleArn:JobExecutionSettings'
, jobExecutionSettings_dataAccessRoleArn
- The Amazon Resource Name (ARN) of a role that has access to the S3
bucket that contains the input files. Amazon Transcribe assumes this
role to read queued media files. If you have specified an output S3
bucket for the transcription results, this role should have access to
the output bucket as well.
If you specify the AllowDeferredExecution
field, you must specify the
DataAccessRoleArn
field.
$sel:allowDeferredExecution:JobExecutionSettings'
, jobExecutionSettings_allowDeferredExecution
- Indicates whether a job should be queued by Amazon Transcribe when the
concurrent execution limit is exceeded. When the
AllowDeferredExecution
field is true, jobs are queued and executed
when the number of executing jobs falls below the concurrent execution
limit. If the field is false, Amazon Transcribe returns a
LimitExceededException
exception.
Note that job queuing is enabled by default for call analytics jobs.
If you specify the AllowDeferredExecution
field, you must specify the
DataAccessRoleArn
field.
jobExecutionSettings_dataAccessRoleArn :: Lens' JobExecutionSettings (Maybe Text) Source #
The Amazon Resource Name (ARN) of a role that has access to the S3 bucket that contains the input files. Amazon Transcribe assumes this role to read queued media files. If you have specified an output S3 bucket for the transcription results, this role should have access to the output bucket as well.
If you specify the AllowDeferredExecution
field, you must specify the
DataAccessRoleArn
field.
jobExecutionSettings_allowDeferredExecution :: Lens' JobExecutionSettings (Maybe Bool) Source #
Indicates whether a job should be queued by Amazon Transcribe when the
concurrent execution limit is exceeded. When the
AllowDeferredExecution
field is true, jobs are queued and executed
when the number of executing jobs falls below the concurrent execution
limit. If the field is false, Amazon Transcribe returns a
LimitExceededException
exception.
Note that job queuing is enabled by default for call analytics jobs.
If you specify the AllowDeferredExecution
field, you must specify the
DataAccessRoleArn
field.
LanguageModel
data LanguageModel Source #
The structure used to describe a custom language model.
See: newLanguageModel
smart constructor.
LanguageModel' | |
|
Instances
newLanguageModel :: LanguageModel Source #
Create a value of LanguageModel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:failureReason:LanguageModel'
, languageModel_failureReason
- The reason why the custom language model couldn't be created.
$sel:languageCode:LanguageModel'
, languageModel_languageCode
- The language code you used to create your custom language model.
$sel:modelName:LanguageModel'
, languageModel_modelName
- The name of the custom language model.
$sel:lastModifiedTime:LanguageModel'
, languageModel_lastModifiedTime
- The most recent time the custom language model was modified.
$sel:upgradeAvailability:LanguageModel'
, languageModel_upgradeAvailability
- Whether the base model used for the custom language model is up to date.
If this field is true
then you are running the most up-to-date version
of the base model in your custom language model.
$sel:inputDataConfig:LanguageModel'
, languageModel_inputDataConfig
- The data access role and Amazon S3 prefixes for the input files used to
train the custom language model.
$sel:baseModelName:LanguageModel'
, languageModel_baseModelName
- The Amazon Transcribe standard language model, or base model used to
create the custom language model.
$sel:modelStatus:LanguageModel'
, languageModel_modelStatus
- The creation status of a custom language model. When the status is
COMPLETED
the model is ready for use.
$sel:createTime:LanguageModel'
, languageModel_createTime
- The time the custom language model was created.
languageModel_failureReason :: Lens' LanguageModel (Maybe Text) Source #
The reason why the custom language model couldn't be created.
languageModel_languageCode :: Lens' LanguageModel (Maybe CLMLanguageCode) Source #
The language code you used to create your custom language model.
languageModel_modelName :: Lens' LanguageModel (Maybe Text) Source #
The name of the custom language model.
languageModel_lastModifiedTime :: Lens' LanguageModel (Maybe UTCTime) Source #
The most recent time the custom language model was modified.
languageModel_upgradeAvailability :: Lens' LanguageModel (Maybe Bool) Source #
Whether the base model used for the custom language model is up to date.
If this field is true
then you are running the most up-to-date version
of the base model in your custom language model.
languageModel_inputDataConfig :: Lens' LanguageModel (Maybe InputDataConfig) Source #
The data access role and Amazon S3 prefixes for the input files used to train the custom language model.
languageModel_baseModelName :: Lens' LanguageModel (Maybe BaseModelName) Source #
The Amazon Transcribe standard language model, or base model used to create the custom language model.
languageModel_modelStatus :: Lens' LanguageModel (Maybe ModelStatus) Source #
The creation status of a custom language model. When the status is
COMPLETED
the model is ready for use.
languageModel_createTime :: Lens' LanguageModel (Maybe UTCTime) Source #
The time the custom language model was created.
Media
Describes the input media file in a transcription request.
See: newMedia
smart constructor.
Media' | |
|
Instances
Eq Media Source # | |
Read Media Source # | |
Show Media Source # | |
Generic Media Source # | |
NFData Media Source # | |
Defined in Amazonka.Transcribe.Types.Media | |
Hashable Media Source # | |
Defined in Amazonka.Transcribe.Types.Media | |
ToJSON Media Source # | |
Defined in Amazonka.Transcribe.Types.Media | |
FromJSON Media Source # | |
type Rep Media Source # | |
Defined in Amazonka.Transcribe.Types.Media type Rep Media = D1 ('MetaData "Media" "Amazonka.Transcribe.Types.Media" "libZSservicesZSamazonka-transcribeZSamazonka-transcribe" 'False) (C1 ('MetaCons "Media'" 'PrefixI 'True) (S1 ('MetaSel ('Just "mediaFileUri") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "redactedMediaFileUri") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)))) |
Create a value of Media
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:mediaFileUri:Media'
, media_mediaFileUri
- The S3 object location of the input media file. The URI must be in the
same region as the API endpoint that you are calling. The general form
is:
For example:
For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.
$sel:redactedMediaFileUri:Media'
, media_redactedMediaFileUri
- The S3 object location for your redacted output media file. This is only
supported for call analytics jobs.
media_mediaFileUri :: Lens' Media (Maybe Text) Source #
The S3 object location of the input media file. The URI must be in the same region as the API endpoint that you are calling. The general form is:
For example:
For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.
media_redactedMediaFileUri :: Lens' Media (Maybe Text) Source #
The S3 object location for your redacted output media file. This is only supported for call analytics jobs.
MedicalTranscript
data MedicalTranscript Source #
Identifies the location of a medical transcript.
See: newMedicalTranscript
smart constructor.
MedicalTranscript' | |
|
Instances
newMedicalTranscript :: MedicalTranscript Source #
Create a value of MedicalTranscript
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:transcriptFileUri:MedicalTranscript'
, medicalTranscript_transcriptFileUri
- The S3 object location of the medical transcript.
Use this URI to access the medical transcript. This URI points to the S3 bucket you created to store the medical transcript.
medicalTranscript_transcriptFileUri :: Lens' MedicalTranscript (Maybe Text) Source #
The S3 object location of the medical transcript.
Use this URI to access the medical transcript. This URI points to the S3 bucket you created to store the medical transcript.
MedicalTranscriptionJob
data MedicalTranscriptionJob Source #
The data structure that contains the information for a medical transcription job.
See: newMedicalTranscriptionJob
smart constructor.
MedicalTranscriptionJob' | |
|
Instances
newMedicalTranscriptionJob :: MedicalTranscriptionJob Source #
Create a value of MedicalTranscriptionJob
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTime:MedicalTranscriptionJob'
, medicalTranscriptionJob_creationTime
- A timestamp that shows when the job was created.
$sel:specialty:MedicalTranscriptionJob'
, medicalTranscriptionJob_specialty
- The medical specialty of any clinicians providing a dictation or having
a conversation. Refer to
Transcribing a medical conversationfor
a list of supported specialties.
$sel:failureReason:MedicalTranscriptionJob'
, medicalTranscriptionJob_failureReason
- If the TranscriptionJobStatus
field is FAILED
, this field contains
information about why the job failed.
The FailureReason
field contains one of the following values:
Unsupported media format
- The media format specified in theMediaFormat
field of the request isn't valid. See the description of theMediaFormat
field for a list of valid values.The media format provided does not match the detected media format
- The media format of the audio file doesn't match the format specified in theMediaFormat
field in the request. Check the media format of your media file and make sure the two values match.Invalid sample rate for audio file
- The sample rate specified in theMediaSampleRateHertz
of the request isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.The sample rate provided does not match the detected sample rate
- The sample rate in the audio file doesn't match the sample rate specified in theMediaSampleRateHertz
field in the request. Check the sample rate of your media file and make sure that the two values match.Invalid file size: file size too large
- The size of your audio file is larger than what Amazon Transcribe Medical can process. For more information, see Guidelines and Quotas in the Amazon Transcribe Medical GuideInvalid number of channels: number of channels too large
- Your audio contains more channels than Amazon Transcribe Medical is configured to process. To request additional channels, see Amazon Transcribe Medical Endpoints and Quotas in the Amazon Web Services General Reference
$sel:languageCode:MedicalTranscriptionJob'
, medicalTranscriptionJob_languageCode
- The language code for the language spoken in the source audio file. US
English (en-US) is the only supported language for medical
transcriptions. Any other value you enter for language code results in a
BadRequestException
error.
$sel:settings:MedicalTranscriptionJob'
, medicalTranscriptionJob_settings
- Object that contains object.
$sel:startTime:MedicalTranscriptionJob'
, medicalTranscriptionJob_startTime
- A timestamp that shows when the job started processing.
$sel:completionTime:MedicalTranscriptionJob'
, medicalTranscriptionJob_completionTime
- A timestamp that shows when the job was completed.
$sel:media:MedicalTranscriptionJob'
, medicalTranscriptionJob_media
- Undocumented member.
$sel:mediaFormat:MedicalTranscriptionJob'
, medicalTranscriptionJob_mediaFormat
- The format of the input media file.
$sel:medicalTranscriptionJobName:MedicalTranscriptionJob'
, medicalTranscriptionJob_medicalTranscriptionJobName
- The name for a given medical transcription job.
$sel:transcriptionJobStatus:MedicalTranscriptionJob'
, medicalTranscriptionJob_transcriptionJobStatus
- The completion status of a medical transcription job.
$sel:type':MedicalTranscriptionJob'
, medicalTranscriptionJob_type
- The type of speech in the transcription job. CONVERSATION
is generally
used for patient-physician dialogues. DICTATION
is the setting for
physicians speaking their notes after seeing a patient. For more
information, see
What is Amazon Transcribe Medical?.
$sel:contentIdentificationType:MedicalTranscriptionJob'
, medicalTranscriptionJob_contentIdentificationType
- Shows the type of content that you've configured Amazon Transcribe
Medical to identify in a transcription job. If the value is PHI
,
you've configured the job to identify personal health information (PHI)
in the transcription output.
$sel:transcript:MedicalTranscriptionJob'
, medicalTranscriptionJob_transcript
- An object that contains the MedicalTranscript
. The MedicalTranscript
contains the TranscriptFileUri
.
$sel:tags:MedicalTranscriptionJob'
, medicalTranscriptionJob_tags
- A key:value pair assigned to a given medical transcription job.
$sel:mediaSampleRateHertz:MedicalTranscriptionJob'
, medicalTranscriptionJob_mediaSampleRateHertz
- The sample rate, in Hertz, of the source audio containing medical
information.
If you don't specify the sample rate, Amazon Transcribe Medical
determines it for you. If you choose to specify the sample rate, it must
match the rate detected by Amazon Transcribe Medical. In most cases, you
should leave the MedicalMediaSampleHertz
blank and let Amazon
Transcribe Medical determine the sample rate.
medicalTranscriptionJob_creationTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime) Source #
A timestamp that shows when the job was created.
medicalTranscriptionJob_specialty :: Lens' MedicalTranscriptionJob (Maybe Specialty) Source #
The medical specialty of any clinicians providing a dictation or having a conversation. Refer to Transcribing a medical conversationfor a list of supported specialties.
medicalTranscriptionJob_failureReason :: Lens' MedicalTranscriptionJob (Maybe Text) Source #
If the TranscriptionJobStatus
field is FAILED
, this field contains
information about why the job failed.
The FailureReason
field contains one of the following values:
Unsupported media format
- The media format specified in theMediaFormat
field of the request isn't valid. See the description of theMediaFormat
field for a list of valid values.The media format provided does not match the detected media format
- The media format of the audio file doesn't match the format specified in theMediaFormat
field in the request. Check the media format of your media file and make sure the two values match.Invalid sample rate for audio file
- The sample rate specified in theMediaSampleRateHertz
of the request isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.The sample rate provided does not match the detected sample rate
- The sample rate in the audio file doesn't match the sample rate specified in theMediaSampleRateHertz
field in the request. Check the sample rate of your media file and make sure that the two values match.Invalid file size: file size too large
- The size of your audio file is larger than what Amazon Transcribe Medical can process. For more information, see Guidelines and Quotas in the Amazon Transcribe Medical GuideInvalid number of channels: number of channels too large
- Your audio contains more channels than Amazon Transcribe Medical is configured to process. To request additional channels, see Amazon Transcribe Medical Endpoints and Quotas in the Amazon Web Services General Reference
medicalTranscriptionJob_languageCode :: Lens' MedicalTranscriptionJob (Maybe LanguageCode) Source #
The language code for the language spoken in the source audio file. US
English (en-US) is the only supported language for medical
transcriptions. Any other value you enter for language code results in a
BadRequestException
error.
medicalTranscriptionJob_settings :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscriptionSetting) Source #
Object that contains object.
medicalTranscriptionJob_startTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime) Source #
A timestamp that shows when the job started processing.
medicalTranscriptionJob_completionTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime) Source #
A timestamp that shows when the job was completed.
medicalTranscriptionJob_media :: Lens' MedicalTranscriptionJob (Maybe Media) Source #
Undocumented member.
medicalTranscriptionJob_mediaFormat :: Lens' MedicalTranscriptionJob (Maybe MediaFormat) Source #
The format of the input media file.
medicalTranscriptionJob_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJob (Maybe Text) Source #
The name for a given medical transcription job.
medicalTranscriptionJob_transcriptionJobStatus :: Lens' MedicalTranscriptionJob (Maybe TranscriptionJobStatus) Source #
The completion status of a medical transcription job.
medicalTranscriptionJob_type :: Lens' MedicalTranscriptionJob (Maybe Type) Source #
The type of speech in the transcription job. CONVERSATION
is generally
used for patient-physician dialogues. DICTATION
is the setting for
physicians speaking their notes after seeing a patient. For more
information, see
What is Amazon Transcribe Medical?.
medicalTranscriptionJob_contentIdentificationType :: Lens' MedicalTranscriptionJob (Maybe MedicalContentIdentificationType) Source #
Shows the type of content that you've configured Amazon Transcribe
Medical to identify in a transcription job. If the value is PHI
,
you've configured the job to identify personal health information (PHI)
in the transcription output.
medicalTranscriptionJob_transcript :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscript) Source #
An object that contains the MedicalTranscript
. The MedicalTranscript
contains the TranscriptFileUri
.
medicalTranscriptionJob_tags :: Lens' MedicalTranscriptionJob (Maybe (NonEmpty Tag)) Source #
A key:value pair assigned to a given medical transcription job.
medicalTranscriptionJob_mediaSampleRateHertz :: Lens' MedicalTranscriptionJob (Maybe Natural) Source #
The sample rate, in Hertz, of the source audio containing medical information.
If you don't specify the sample rate, Amazon Transcribe Medical
determines it for you. If you choose to specify the sample rate, it must
match the rate detected by Amazon Transcribe Medical. In most cases, you
should leave the MedicalMediaSampleHertz
blank and let Amazon
Transcribe Medical determine the sample rate.
MedicalTranscriptionJobSummary
data MedicalTranscriptionJobSummary Source #
Provides summary information about a transcription job.
See: newMedicalTranscriptionJobSummary
smart constructor.
MedicalTranscriptionJobSummary' | |
|
Instances
newMedicalTranscriptionJobSummary :: MedicalTranscriptionJobSummary Source #
Create a value of MedicalTranscriptionJobSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTime:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_creationTime
- A timestamp that shows when the medical transcription job was created.
$sel:specialty:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_specialty
- The medical specialty of the transcription job. Refer to
Transcribing a medical conversationfor
a list of supported specialties.
$sel:failureReason:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_failureReason
- If the TranscriptionJobStatus
field is FAILED
, a description of the
error.
$sel:languageCode:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_languageCode
- The language of the transcript in the source audio file.
$sel:outputLocationType:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_outputLocationType
- Indicates the location of the transcription job's output. This field
must be the path of an S3 bucket; if you don't already have an S3
bucket, one is created based on the path you add.
$sel:startTime:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_startTime
- A timestamp that shows when the job began processing.
$sel:completionTime:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_completionTime
- A timestamp that shows when the job was completed.
$sel:medicalTranscriptionJobName:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_medicalTranscriptionJobName
- The name of a medical transcription job.
$sel:transcriptionJobStatus:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_transcriptionJobStatus
- The status of the medical transcription job.
$sel:type':MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_type
- The speech of the clinician in the input audio.
$sel:contentIdentificationType:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_contentIdentificationType
- Shows the type of information you've configured Amazon Transcribe
Medical to identify in a transcription job. If the value is PHI
,
you've configured the transcription job to identify personal health
information (PHI).
medicalTranscriptionJobSummary_creationTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the medical transcription job was created.
medicalTranscriptionJobSummary_specialty :: Lens' MedicalTranscriptionJobSummary (Maybe Specialty) Source #
The medical specialty of the transcription job. Refer to Transcribing a medical conversationfor a list of supported specialties.
medicalTranscriptionJobSummary_failureReason :: Lens' MedicalTranscriptionJobSummary (Maybe Text) Source #
If the TranscriptionJobStatus
field is FAILED
, a description of the
error.
medicalTranscriptionJobSummary_languageCode :: Lens' MedicalTranscriptionJobSummary (Maybe LanguageCode) Source #
The language of the transcript in the source audio file.
medicalTranscriptionJobSummary_outputLocationType :: Lens' MedicalTranscriptionJobSummary (Maybe OutputLocationType) Source #
Indicates the location of the transcription job's output. This field must be the path of an S3 bucket; if you don't already have an S3 bucket, one is created based on the path you add.
medicalTranscriptionJobSummary_startTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the job began processing.
medicalTranscriptionJobSummary_completionTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the job was completed.
medicalTranscriptionJobSummary_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJobSummary (Maybe Text) Source #
The name of a medical transcription job.
medicalTranscriptionJobSummary_transcriptionJobStatus :: Lens' MedicalTranscriptionJobSummary (Maybe TranscriptionJobStatus) Source #
The status of the medical transcription job.
medicalTranscriptionJobSummary_type :: Lens' MedicalTranscriptionJobSummary (Maybe Type) Source #
The speech of the clinician in the input audio.
medicalTranscriptionJobSummary_contentIdentificationType :: Lens' MedicalTranscriptionJobSummary (Maybe MedicalContentIdentificationType) Source #
Shows the type of information you've configured Amazon Transcribe
Medical to identify in a transcription job. If the value is PHI
,
you've configured the transcription job to identify personal health
information (PHI).
MedicalTranscriptionSetting
data MedicalTranscriptionSetting Source #
Optional settings for the StartMedicalTranscriptionJob operation.
See: newMedicalTranscriptionSetting
smart constructor.
MedicalTranscriptionSetting' | |
|
Instances
newMedicalTranscriptionSetting :: MedicalTranscriptionSetting Source #
Create a value of MedicalTranscriptionSetting
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:vocabularyName:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_vocabularyName
- The name of the vocabulary to use when processing a medical
transcription job.
$sel:maxAlternatives:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_maxAlternatives
- The maximum number of alternatives that you tell the service to return.
If you specify the MaxAlternatives
field, you must set the
ShowAlternatives
field to true.
$sel:channelIdentification:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_channelIdentification
- Instructs Amazon Transcribe Medical to process each audio channel
separately and then merge the transcription output of each channel into
a single transcription.
Amazon Transcribe Medical also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of item. The alternative transcriptions also come with confidence scores provided by Amazon Transcribe Medical.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
$sel:showAlternatives:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_showAlternatives
- Determines whether alternative transcripts are generated along with the
transcript that has the highest confidence. If you set
ShowAlternatives
field to true, you must also set the maximum number
of alternatives to return in the MaxAlternatives
field.
$sel:maxSpeakerLabels:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_maxSpeakerLabels
- The maximum number of speakers to identify in the input audio. If there
are more speakers in the audio than this number, multiple speakers are
identified as a single speaker. If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
$sel:showSpeakerLabels:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_showSpeakerLabels
- Determines whether the transcription job uses speaker recognition to
identify different speakers in the input audio. Speaker recognition
labels individual speakers in the audio file. If you set the
ShowSpeakerLabels
field to true, you must also set the maximum number
of speaker labels in the MaxSpeakerLabels
field.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
.
medicalTranscriptionSetting_vocabularyName :: Lens' MedicalTranscriptionSetting (Maybe Text) Source #
The name of the vocabulary to use when processing a medical transcription job.
medicalTranscriptionSetting_maxAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Natural) Source #
The maximum number of alternatives that you tell the service to return.
If you specify the MaxAlternatives
field, you must set the
ShowAlternatives
field to true.
medicalTranscriptionSetting_channelIdentification :: Lens' MedicalTranscriptionSetting (Maybe Bool) Source #
Instructs Amazon Transcribe Medical to process each audio channel separately and then merge the transcription output of each channel into a single transcription.
Amazon Transcribe Medical also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of item. The alternative transcriptions also come with confidence scores provided by Amazon Transcribe Medical.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
medicalTranscriptionSetting_showAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Bool) Source #
Determines whether alternative transcripts are generated along with the
transcript that has the highest confidence. If you set
ShowAlternatives
field to true, you must also set the maximum number
of alternatives to return in the MaxAlternatives
field.
medicalTranscriptionSetting_maxSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Natural) Source #
The maximum number of speakers to identify in the input audio. If there
are more speakers in the audio than this number, multiple speakers are
identified as a single speaker. If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
medicalTranscriptionSetting_showSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Bool) Source #
Determines whether the transcription job uses speaker recognition to
identify different speakers in the input audio. Speaker recognition
labels individual speakers in the audio file. If you set the
ShowSpeakerLabels
field to true, you must also set the maximum number
of speaker labels in the MaxSpeakerLabels
field.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
.
ModelSettings
data ModelSettings Source #
The object used to call your custom language model to your transcription job.
See: newModelSettings
smart constructor.
ModelSettings' | |
|
Instances
newModelSettings :: ModelSettings Source #
Create a value of ModelSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:languageModelName:ModelSettings'
, modelSettings_languageModelName
- The name of your custom language model.
modelSettings_languageModelName :: Lens' ModelSettings (Maybe Text) Source #
The name of your custom language model.
NonTalkTimeFilter
data NonTalkTimeFilter Source #
An object that enables you to configure your category to be applied to call analytics jobs where either the customer or agent was interrupted.
See: newNonTalkTimeFilter
smart constructor.
NonTalkTimeFilter' | |
|
Instances
newNonTalkTimeFilter :: NonTalkTimeFilter Source #
Create a value of NonTalkTimeFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:relativeTimeRange:NonTalkTimeFilter'
, nonTalkTimeFilter_relativeTimeRange
- An object that allows percentages to specify the proportion of the call
where there was silence. For example, you can specify the first half of
the call. You can also specify the period of time between halfway
through to three-quarters of the way through the call. Because the
length of conversation can vary between calls, you can apply relative
time ranges across all calls.
$sel:negate:NonTalkTimeFilter'
, nonTalkTimeFilter_negate
- Set to TRUE
to look for a time period when people were talking.
$sel:threshold:NonTalkTimeFilter'
, nonTalkTimeFilter_threshold
- The duration of the period when neither the customer nor agent was
talking.
$sel:absoluteTimeRange:NonTalkTimeFilter'
, nonTalkTimeFilter_absoluteTimeRange
- An object you can use to specify a time range (in milliseconds) for when
no one is talking. For example, you could specify a time period between
the 30,000 millisecond mark and the 45,000 millisecond mark. You could
also specify the time period as the first 15,000 milliseconds or the
last 15,000 milliseconds.
nonTalkTimeFilter_relativeTimeRange :: Lens' NonTalkTimeFilter (Maybe RelativeTimeRange) Source #
An object that allows percentages to specify the proportion of the call where there was silence. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.
nonTalkTimeFilter_negate :: Lens' NonTalkTimeFilter (Maybe Bool) Source #
Set to TRUE
to look for a time period when people were talking.
nonTalkTimeFilter_threshold :: Lens' NonTalkTimeFilter (Maybe Natural) Source #
The duration of the period when neither the customer nor agent was talking.
nonTalkTimeFilter_absoluteTimeRange :: Lens' NonTalkTimeFilter (Maybe AbsoluteTimeRange) Source #
An object you can use to specify a time range (in milliseconds) for when no one is talking. For example, you could specify a time period between the 30,000 millisecond mark and the 45,000 millisecond mark. You could also specify the time period as the first 15,000 milliseconds or the last 15,000 milliseconds.
RelativeTimeRange
data RelativeTimeRange Source #
An object that allows percentages to specify the proportion of the call where you would like to apply a filter. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.
See: newRelativeTimeRange
smart constructor.
RelativeTimeRange' | |
|
Instances
newRelativeTimeRange :: RelativeTimeRange Source #
Create a value of RelativeTimeRange
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:endPercentage:RelativeTimeRange'
, relativeTimeRange_endPercentage
- A value that indicates the percentage of the end of the time range. To
set a relative time range, you must specify a start percentage and an
end percentage. For example, if you specify the following values:
- StartPercentage - 10
- EndPercentage - 50
This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.
$sel:first:RelativeTimeRange'
, relativeTimeRange_first
- A range that takes the portion of the call up to the time in
milliseconds set by the value that you've specified. For example, if
you specify 120000
, the time range is set for the first 120,000
milliseconds of the call.
$sel:last:RelativeTimeRange'
, relativeTimeRange_last
- A range that takes the portion of the call from the time in milliseconds
set by the value that you've specified to the end of the call. For
example, if you specify 120000
, the time range is set for the last
120,000 milliseconds of the call.
$sel:startPercentage:RelativeTimeRange'
, relativeTimeRange_startPercentage
- A value that indicates the percentage of the beginning of the time
range. To set a relative time range, you must specify a start percentage
and an end percentage. For example, if you specify the following values:
- StartPercentage - 10
- EndPercentage - 50
This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.
relativeTimeRange_endPercentage :: Lens' RelativeTimeRange (Maybe Natural) Source #
A value that indicates the percentage of the end of the time range. To set a relative time range, you must specify a start percentage and an end percentage. For example, if you specify the following values:
- StartPercentage - 10
- EndPercentage - 50
This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.
relativeTimeRange_first :: Lens' RelativeTimeRange (Maybe Natural) Source #
A range that takes the portion of the call up to the time in
milliseconds set by the value that you've specified. For example, if
you specify 120000
, the time range is set for the first 120,000
milliseconds of the call.
relativeTimeRange_last :: Lens' RelativeTimeRange (Maybe Natural) Source #
A range that takes the portion of the call from the time in milliseconds
set by the value that you've specified to the end of the call. For
example, if you specify 120000
, the time range is set for the last
120,000 milliseconds of the call.
relativeTimeRange_startPercentage :: Lens' RelativeTimeRange (Maybe Natural) Source #
A value that indicates the percentage of the beginning of the time range. To set a relative time range, you must specify a start percentage and an end percentage. For example, if you specify the following values:
- StartPercentage - 10
- EndPercentage - 50
This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.
Rule
A condition in the call between the customer and the agent that you want to filter for.
See: newRule
smart constructor.
Rule' | |
|
Instances
Create a value of Rule
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:nonTalkTimeFilter:Rule'
, rule_nonTalkTimeFilter
- A condition for a time period when neither the customer nor the agent
was talking.
$sel:transcriptFilter:Rule'
, rule_transcriptFilter
- A condition that catches particular words or phrases based on a exact
match. For example, if you set the phrase "I want to speak to the
manager", only that exact phrase will be returned.
$sel:sentimentFilter:Rule'
, rule_sentimentFilter
- A condition that is applied to a particular customer sentiment.
$sel:interruptionFilter:Rule'
, rule_interruptionFilter
- A condition for a time period when either the customer or agent was
interrupting the other person.
rule_nonTalkTimeFilter :: Lens' Rule (Maybe NonTalkTimeFilter) Source #
A condition for a time period when neither the customer nor the agent was talking.
rule_transcriptFilter :: Lens' Rule (Maybe TranscriptFilter) Source #
A condition that catches particular words or phrases based on a exact match. For example, if you set the phrase "I want to speak to the manager", only that exact phrase will be returned.
rule_sentimentFilter :: Lens' Rule (Maybe SentimentFilter) Source #
A condition that is applied to a particular customer sentiment.
rule_interruptionFilter :: Lens' Rule (Maybe InterruptionFilter) Source #
A condition for a time period when either the customer or agent was interrupting the other person.
SentimentFilter
data SentimentFilter Source #
An object that enables you to specify a particular customer or agent sentiment. If at least 50 percent of the conversation turns (the back-and-forth between two speakers) in a specified time period match the specified sentiment, Amazon Transcribe will consider the sentiment a match.
See: newSentimentFilter
smart constructor.
SentimentFilter' | |
|
Instances
Create a value of SentimentFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:participantRole:SentimentFilter'
, sentimentFilter_participantRole
- A value that determines whether the sentiment belongs to the customer or
the agent.
$sel:relativeTimeRange:SentimentFilter'
, sentimentFilter_relativeTimeRange
- The time range, set in percentages, that correspond to proportion of the
call.
$sel:negate:SentimentFilter'
, sentimentFilter_negate
- Set to TRUE
to look for sentiments that weren't specified in the
request.
$sel:absoluteTimeRange:SentimentFilter'
, sentimentFilter_absoluteTimeRange
- The time range, measured in seconds, of the sentiment.
$sel:sentiments:SentimentFilter'
, sentimentFilter_sentiments
- An array that enables you to specify sentiments for the customer or
agent. You can specify one or more values.
sentimentFilter_participantRole :: Lens' SentimentFilter (Maybe ParticipantRole) Source #
A value that determines whether the sentiment belongs to the customer or the agent.
sentimentFilter_relativeTimeRange :: Lens' SentimentFilter (Maybe RelativeTimeRange) Source #
The time range, set in percentages, that correspond to proportion of the call.
sentimentFilter_negate :: Lens' SentimentFilter (Maybe Bool) Source #
Set to TRUE
to look for sentiments that weren't specified in the
request.
sentimentFilter_absoluteTimeRange :: Lens' SentimentFilter (Maybe AbsoluteTimeRange) Source #
The time range, measured in seconds, of the sentiment.
sentimentFilter_sentiments :: Lens' SentimentFilter (NonEmpty SentimentValue) Source #
An array that enables you to specify sentiments for the customer or agent. You can specify one or more values.
Settings
Provides optional settings for the StartTranscriptionJob
operation.
See: newSettings
smart constructor.
Settings' | |
|
Instances
newSettings :: Settings Source #
Create a value of Settings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:vocabularyName:Settings'
, settings_vocabularyName
- The name of a vocabulary to use when processing the transcription job.
$sel:maxAlternatives:Settings'
, settings_maxAlternatives
- The number of alternative transcriptions that the service should return.
If you specify the MaxAlternatives
field, you must set the
ShowAlternatives
field to true.
$sel:channelIdentification:Settings'
, settings_channelIdentification
- Instructs Amazon Transcribe to process each audio channel separately and
then merge the transcription output of each channel into a single
transcription.
Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
.
$sel:showAlternatives:Settings'
, settings_showAlternatives
- Determines whether the transcription contains alternative
transcriptions. If you set the ShowAlternatives
field to true, you
must also set the maximum number of alternatives to return in the
MaxAlternatives
field.
$sel:maxSpeakerLabels:Settings'
, settings_maxSpeakerLabels
- The maximum number of speakers to identify in the input audio. If there
are more speakers in the audio than this number, multiple speakers are
identified as a single speaker. If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
$sel:vocabularyFilterName:Settings'
, settings_vocabularyFilterName
- The name of the vocabulary filter to use when transcribing the audio.
The filter that you specify must have the same language code as the
transcription job.
$sel:showSpeakerLabels:Settings'
, settings_showSpeakerLabels
- Determines whether the transcription job uses speaker recognition to
identify different speakers in the input audio. Speaker recognition
labels individual speakers in the audio file. If you set the
ShowSpeakerLabels
field to true, you must also set the maximum number
of speaker labels MaxSpeakerLabels
field.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
.
$sel:vocabularyFilterMethod:Settings'
, settings_vocabularyFilterMethod
- Set to mask
to remove filtered text from the transcript and replace it
with three asterisks ("***") as placeholder text. Set to remove
to
remove filtered text from the transcript without using placeholder text.
Set to tag
to mark the word in the transcription output that matches
the vocabulary filter. When you set the filter method to tag
, the
words matching your vocabulary filter are not masked or removed.
settings_vocabularyName :: Lens' Settings (Maybe Text) Source #
The name of a vocabulary to use when processing the transcription job.
settings_maxAlternatives :: Lens' Settings (Maybe Natural) Source #
The number of alternative transcriptions that the service should return.
If you specify the MaxAlternatives
field, you must set the
ShowAlternatives
field to true.
settings_channelIdentification :: Lens' Settings (Maybe Bool) Source #
Instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.
Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
.
settings_showAlternatives :: Lens' Settings (Maybe Bool) Source #
Determines whether the transcription contains alternative
transcriptions. If you set the ShowAlternatives
field to true, you
must also set the maximum number of alternatives to return in the
MaxAlternatives
field.
settings_maxSpeakerLabels :: Lens' Settings (Maybe Natural) Source #
The maximum number of speakers to identify in the input audio. If there
are more speakers in the audio than this number, multiple speakers are
identified as a single speaker. If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
settings_vocabularyFilterName :: Lens' Settings (Maybe Text) Source #
The name of the vocabulary filter to use when transcribing the audio. The filter that you specify must have the same language code as the transcription job.
settings_showSpeakerLabels :: Lens' Settings (Maybe Bool) Source #
Determines whether the transcription job uses speaker recognition to
identify different speakers in the input audio. Speaker recognition
labels individual speakers in the audio file. If you set the
ShowSpeakerLabels
field to true, you must also set the maximum number
of speaker labels MaxSpeakerLabels
field.
You can't set both ShowSpeakerLabels
and ChannelIdentification
in
the same request. If you set both, your request returns a
BadRequestException
.
settings_vocabularyFilterMethod :: Lens' Settings (Maybe VocabularyFilterMethod) Source #
Set to mask
to remove filtered text from the transcript and replace it
with three asterisks ("***") as placeholder text. Set to remove
to
remove filtered text from the transcript without using placeholder text.
Set to tag
to mark the word in the transcription output that matches
the vocabulary filter. When you set the filter method to tag
, the
words matching your vocabulary filter are not masked or removed.
Subtitles
Generate subtitles for your batch transcription job.
See: newSubtitles
smart constructor.
Subtitles' | |
|
Instances
Eq Subtitles Source # | |
Read Subtitles Source # | |
Show Subtitles Source # | |
Generic Subtitles Source # | |
NFData Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles | |
Hashable Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles | |
ToJSON Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles | |
type Rep Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles type Rep Subtitles = D1 ('MetaData "Subtitles" "Amazonka.Transcribe.Types.Subtitles" "libZSservicesZSamazonka-transcribeZSamazonka-transcribe" 'False) (C1 ('MetaCons "Subtitles'" 'PrefixI 'True) (S1 ('MetaSel ('Just "formats") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe [SubtitleFormat])))) |
newSubtitles :: Subtitles Source #
Create a value of Subtitles
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:formats:Subtitles'
, subtitles_formats
- Specify the output format for your subtitle file.
subtitles_formats :: Lens' Subtitles (Maybe [SubtitleFormat]) Source #
Specify the output format for your subtitle file.
SubtitlesOutput
data SubtitlesOutput Source #
Specify the output format for your subtitle file.
See: newSubtitlesOutput
smart constructor.
SubtitlesOutput' | |
|
Instances
newSubtitlesOutput :: SubtitlesOutput Source #
Create a value of SubtitlesOutput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:formats:SubtitlesOutput'
, subtitlesOutput_formats
- Specify the output format for your subtitle file; if you select both SRT
and VTT formats, two output files are genereated.
$sel:subtitleFileUris:SubtitlesOutput'
, subtitlesOutput_subtitleFileUris
- Choose the output location for your subtitle file. This location must be
an S3 bucket.
subtitlesOutput_formats :: Lens' SubtitlesOutput (Maybe [SubtitleFormat]) Source #
Specify the output format for your subtitle file; if you select both SRT and VTT formats, two output files are genereated.
subtitlesOutput_subtitleFileUris :: Lens' SubtitlesOutput (Maybe [Text]) Source #
Choose the output location for your subtitle file. This location must be an S3 bucket.
Tag
A key:value pair that adds metadata to a resource used by Amazon Transcribe. For example, a tag with the key:value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by your organization's sales department.
See: newTag
smart constructor.
Tag' | |
|
Instances
Eq Tag Source # | |
Read Tag Source # | |
Show Tag Source # | |
Generic Tag Source # | |
NFData Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag | |
Hashable Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag | |
ToJSON Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag | |
FromJSON Tag Source # | |
type Rep Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag type Rep Tag = D1 ('MetaData "Tag" "Amazonka.Transcribe.Types.Tag" "libZSservicesZSamazonka-transcribeZSamazonka-transcribe" 'False) (C1 ('MetaCons "Tag'" 'PrefixI 'True) (S1 ('MetaSel ('Just "key") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 Text) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 Text))) |
Create a value of Tag
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:key:Tag'
, tag_key
- The first part of a key:value pair that forms a tag associated with a
given resource. For example, in the tag ‘Department’:’Sales’, the key is
'Department'.
$sel:value:Tag'
, tag_value
- The second part of a key:value pair that forms a tag associated with a
given resource. For example, in the tag ‘Department’:’Sales’, the value
is 'Sales'.
tag_key :: Lens' Tag Text Source #
The first part of a key:value pair that forms a tag associated with a given resource. For example, in the tag ‘Department’:’Sales’, the key is 'Department'.
tag_value :: Lens' Tag Text Source #
The second part of a key:value pair that forms a tag associated with a given resource. For example, in the tag ‘Department’:’Sales’, the value is 'Sales'.
Transcript
data Transcript Source #
Identifies the location of a transcription.
See: newTranscript
smart constructor.
Transcript' | |
|
Instances
newTranscript :: Transcript Source #
Create a value of Transcript
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:redactedTranscriptFileUri:Transcript'
, transcript_redactedTranscriptFileUri
- The S3 object location of the redacted transcript.
Use this URI to access the redacted transcript. If you specified an S3
bucket in the OutputBucketName
field when you created the job, this is
the URI of that bucket. If you chose to store the transcript in Amazon
Transcribe, this is a shareable URL that provides secure access to that
location.
$sel:transcriptFileUri:Transcript'
, transcript_transcriptFileUri
- The S3 object location of the transcript.
Use this URI to access the transcript. If you specified an S3 bucket in
the OutputBucketName
field when you created the job, this is the URI
of that bucket. If you chose to store the transcript in Amazon
Transcribe, this is a shareable URL that provides secure access to that
location.
transcript_redactedTranscriptFileUri :: Lens' Transcript (Maybe Text) Source #
The S3 object location of the redacted transcript.
Use this URI to access the redacted transcript. If you specified an S3
bucket in the OutputBucketName
field when you created the job, this is
the URI of that bucket. If you chose to store the transcript in Amazon
Transcribe, this is a shareable URL that provides secure access to that
location.
transcript_transcriptFileUri :: Lens' Transcript (Maybe Text) Source #
The S3 object location of the transcript.
Use this URI to access the transcript. If you specified an S3 bucket in
the OutputBucketName
field when you created the job, this is the URI
of that bucket. If you chose to store the transcript in Amazon
Transcribe, this is a shareable URL that provides secure access to that
location.
TranscriptFilter
data TranscriptFilter Source #
Matches the output of the transcription to either the specific phrases that you specify, or the intent of the phrases that you specify.
See: newTranscriptFilter
smart constructor.
TranscriptFilter' | |
|
Instances
Create a value of TranscriptFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:participantRole:TranscriptFilter'
, transcriptFilter_participantRole
- Determines whether the customer or the agent is speaking the phrases
that you've specified.
$sel:relativeTimeRange:TranscriptFilter'
, transcriptFilter_relativeTimeRange
- An object that allows percentages to specify the proportion of the call
where you would like to apply a filter. For example, you can specify the
first half of the call. You can also specify the period of time between
halfway through to three-quarters of the way through the call. Because
the length of conversation can vary between calls, you can apply
relative time ranges across all calls.
$sel:negate:TranscriptFilter'
, transcriptFilter_negate
- If TRUE
, the rule that you specify is applied to everything except for
the phrases that you specify.
$sel:absoluteTimeRange:TranscriptFilter'
, transcriptFilter_absoluteTimeRange
- A time range, set in seconds, between two points in the call.
$sel:transcriptFilterType:TranscriptFilter'
, transcriptFilter_transcriptFilterType
- Matches the phrase to the transcription output in a word for word
fashion. For example, if you specify the phrase "I want to speak to the
manager." Amazon Transcribe attempts to match that specific phrase to
the transcription.
$sel:targets:TranscriptFilter'
, transcriptFilter_targets
- The phrases that you're specifying for the transcript filter to match.
transcriptFilter_participantRole :: Lens' TranscriptFilter (Maybe ParticipantRole) Source #
Determines whether the customer or the agent is speaking the phrases that you've specified.
transcriptFilter_relativeTimeRange :: Lens' TranscriptFilter (Maybe RelativeTimeRange) Source #
An object that allows percentages to specify the proportion of the call where you would like to apply a filter. For example, you can specify the first half of the call. You can also specify the period of time between halfway through to three-quarters of the way through the call. Because the length of conversation can vary between calls, you can apply relative time ranges across all calls.
transcriptFilter_negate :: Lens' TranscriptFilter (Maybe Bool) Source #
If TRUE
, the rule that you specify is applied to everything except for
the phrases that you specify.
transcriptFilter_absoluteTimeRange :: Lens' TranscriptFilter (Maybe AbsoluteTimeRange) Source #
A time range, set in seconds, between two points in the call.
transcriptFilter_transcriptFilterType :: Lens' TranscriptFilter TranscriptFilterType Source #
Matches the phrase to the transcription output in a word for word fashion. For example, if you specify the phrase "I want to speak to the manager." Amazon Transcribe attempts to match that specific phrase to the transcription.
transcriptFilter_targets :: Lens' TranscriptFilter (NonEmpty Text) Source #
The phrases that you're specifying for the transcript filter to match.
TranscriptionJob
data TranscriptionJob Source #
Describes an asynchronous transcription job that was created with the
StartTranscriptionJob
operation.
See: newTranscriptionJob
smart constructor.
TranscriptionJob' | |
|
Instances
newTranscriptionJob :: TranscriptionJob Source #
Create a value of TranscriptionJob
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTime:TranscriptionJob'
, transcriptionJob_creationTime
- A timestamp that shows when the job was created.
$sel:failureReason:TranscriptionJob'
, transcriptionJob_failureReason
- If the TranscriptionJobStatus
field is FAILED
, this field contains
information about why the job failed.
The FailureReason
field can contain one of the following values:
Unsupported media format
- The media format specified in theMediaFormat
field of the request isn't valid. See the description of theMediaFormat
field for a list of valid values.The media format provided does not match the detected media format
- The media format of the audio file doesn't match the format
specified in the
MediaFormat
field in the request. Check the media format of your media file and make sure that the two values match. Invalid sample rate for audio file
- The sample rate specified in theMediaSampleRateHertz
of the request isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.The sample rate provided does not match the detected sample rate
- The sample rate in the audio file doesn't match the sample rate specified in theMediaSampleRateHertz
field in the request. Check the sample rate of your media file and make sure that the two values match.Invalid file size: file size too large
- The size of your audio file is larger than Amazon Transcribe can process. For more information, see Limits in the Amazon Transcribe Developer Guide.Invalid number of channels: number of channels too large
- Your audio contains more channels than Amazon Transcribe is configured to process. To request additional channels, see Amazon Transcribe Limits in the Amazon Web Services General Reference.
$sel:contentRedaction:TranscriptionJob'
, transcriptionJob_contentRedaction
- An object that describes content redaction settings for the
transcription job.
$sel:identifiedLanguageScore:TranscriptionJob'
, transcriptionJob_identifiedLanguageScore
- A value between zero and one that Amazon Transcribe assigned to the
language that it identified in the source audio. Larger values indicate
that Amazon Transcribe has higher confidence in the language it
identified.
$sel:subtitles:TranscriptionJob'
, transcriptionJob_subtitles
- Generate subtitles for your batch transcription job.
$sel:languageCode:TranscriptionJob'
, transcriptionJob_languageCode
- The language code for the input speech.
$sel:languageOptions:TranscriptionJob'
, transcriptionJob_languageOptions
- An object that shows the optional array of languages inputted for
transcription jobs with automatic language identification enabled.
$sel:settings:TranscriptionJob'
, transcriptionJob_settings
- Optional settings for the transcription job. Use these settings to turn
on speaker recognition, to set the maximum number of speakers that
should be identified and to specify a custom vocabulary to use when
processing the transcription job.
$sel:startTime:TranscriptionJob'
, transcriptionJob_startTime
- A timestamp that shows when the job started processing.
$sel:completionTime:TranscriptionJob'
, transcriptionJob_completionTime
- A timestamp that shows when the job completed.
$sel:media:TranscriptionJob'
, transcriptionJob_media
- An object that describes the input media for the transcription job.
$sel:mediaFormat:TranscriptionJob'
, transcriptionJob_mediaFormat
- The format of the input media file.
$sel:modelSettings:TranscriptionJob'
, transcriptionJob_modelSettings
- An object containing the details of your custom language model.
$sel:transcriptionJobStatus:TranscriptionJob'
, transcriptionJob_transcriptionJobStatus
- The status of the transcription job.
$sel:jobExecutionSettings:TranscriptionJob'
, transcriptionJob_jobExecutionSettings
- Provides information about how a transcription job is executed.
$sel:transcriptionJobName:TranscriptionJob'
, transcriptionJob_transcriptionJobName
- The name of the transcription job.
$sel:identifyLanguage:TranscriptionJob'
, transcriptionJob_identifyLanguage
- A value that shows if automatic language identification was enabled for
a transcription job.
$sel:transcript:TranscriptionJob'
, transcriptionJob_transcript
- An object that describes the output of the transcription job.
$sel:tags:TranscriptionJob'
, transcriptionJob_tags
- A key:value pair assigned to a given transcription job.
$sel:mediaSampleRateHertz:TranscriptionJob'
, transcriptionJob_mediaSampleRateHertz
- The sample rate, in Hertz, of the audio track in the input media file.
transcriptionJob_creationTime :: Lens' TranscriptionJob (Maybe UTCTime) Source #
A timestamp that shows when the job was created.
transcriptionJob_failureReason :: Lens' TranscriptionJob (Maybe Text) Source #
If the TranscriptionJobStatus
field is FAILED
, this field contains
information about why the job failed.
The FailureReason
field can contain one of the following values:
Unsupported media format
- The media format specified in theMediaFormat
field of the request isn't valid. See the description of theMediaFormat
field for a list of valid values.The media format provided does not match the detected media format
- The media format of the audio file doesn't match the format
specified in the
MediaFormat
field in the request. Check the media format of your media file and make sure that the two values match. Invalid sample rate for audio file
- The sample rate specified in theMediaSampleRateHertz
of the request isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.The sample rate provided does not match the detected sample rate
- The sample rate in the audio file doesn't match the sample rate specified in theMediaSampleRateHertz
field in the request. Check the sample rate of your media file and make sure that the two values match.Invalid file size: file size too large
- The size of your audio file is larger than Amazon Transcribe can process. For more information, see Limits in the Amazon Transcribe Developer Guide.Invalid number of channels: number of channels too large
- Your audio contains more channels than Amazon Transcribe is configured to process. To request additional channels, see Amazon Transcribe Limits in the Amazon Web Services General Reference.
transcriptionJob_contentRedaction :: Lens' TranscriptionJob (Maybe ContentRedaction) Source #
An object that describes content redaction settings for the transcription job.
transcriptionJob_identifiedLanguageScore :: Lens' TranscriptionJob (Maybe Double) Source #
A value between zero and one that Amazon Transcribe assigned to the language that it identified in the source audio. Larger values indicate that Amazon Transcribe has higher confidence in the language it identified.
transcriptionJob_subtitles :: Lens' TranscriptionJob (Maybe SubtitlesOutput) Source #
Generate subtitles for your batch transcription job.
transcriptionJob_languageCode :: Lens' TranscriptionJob (Maybe LanguageCode) Source #
The language code for the input speech.
transcriptionJob_languageOptions :: Lens' TranscriptionJob (Maybe (NonEmpty LanguageCode)) Source #
An object that shows the optional array of languages inputted for transcription jobs with automatic language identification enabled.
transcriptionJob_settings :: Lens' TranscriptionJob (Maybe Settings) Source #
Optional settings for the transcription job. Use these settings to turn on speaker recognition, to set the maximum number of speakers that should be identified and to specify a custom vocabulary to use when processing the transcription job.
transcriptionJob_startTime :: Lens' TranscriptionJob (Maybe UTCTime) Source #
A timestamp that shows when the job started processing.
transcriptionJob_completionTime :: Lens' TranscriptionJob (Maybe UTCTime) Source #
A timestamp that shows when the job completed.
transcriptionJob_media :: Lens' TranscriptionJob (Maybe Media) Source #
An object that describes the input media for the transcription job.
transcriptionJob_mediaFormat :: Lens' TranscriptionJob (Maybe MediaFormat) Source #
The format of the input media file.
transcriptionJob_modelSettings :: Lens' TranscriptionJob (Maybe ModelSettings) Source #
An object containing the details of your custom language model.
transcriptionJob_transcriptionJobStatus :: Lens' TranscriptionJob (Maybe TranscriptionJobStatus) Source #
The status of the transcription job.
transcriptionJob_jobExecutionSettings :: Lens' TranscriptionJob (Maybe JobExecutionSettings) Source #
Provides information about how a transcription job is executed.
transcriptionJob_transcriptionJobName :: Lens' TranscriptionJob (Maybe Text) Source #
The name of the transcription job.
transcriptionJob_identifyLanguage :: Lens' TranscriptionJob (Maybe Bool) Source #
A value that shows if automatic language identification was enabled for a transcription job.
transcriptionJob_transcript :: Lens' TranscriptionJob (Maybe Transcript) Source #
An object that describes the output of the transcription job.
transcriptionJob_tags :: Lens' TranscriptionJob (Maybe (NonEmpty Tag)) Source #
A key:value pair assigned to a given transcription job.
transcriptionJob_mediaSampleRateHertz :: Lens' TranscriptionJob (Maybe Natural) Source #
The sample rate, in Hertz, of the audio track in the input media file.
TranscriptionJobSummary
data TranscriptionJobSummary Source #
Provides a summary of information about a transcription job.
See: newTranscriptionJobSummary
smart constructor.
TranscriptionJobSummary' | |
|
Instances
newTranscriptionJobSummary :: TranscriptionJobSummary Source #
Create a value of TranscriptionJobSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTime:TranscriptionJobSummary'
, transcriptionJobSummary_creationTime
- A timestamp that shows when the job was created.
$sel:failureReason:TranscriptionJobSummary'
, transcriptionJobSummary_failureReason
- If the TranscriptionJobStatus
field is FAILED
, a description of the
error.
$sel:contentRedaction:TranscriptionJobSummary'
, transcriptionJobSummary_contentRedaction
- The content redaction settings of the transcription job.
$sel:identifiedLanguageScore:TranscriptionJobSummary'
, transcriptionJobSummary_identifiedLanguageScore
- A value between zero and one that Amazon Transcribe assigned to the
language it identified in the source audio. A higher score indicates
that Amazon Transcribe is more confident in the language it identified.
$sel:languageCode:TranscriptionJobSummary'
, transcriptionJobSummary_languageCode
- The language code for the input speech.
$sel:outputLocationType:TranscriptionJobSummary'
, transcriptionJobSummary_outputLocationType
- Indicates the location of the output of the transcription job.
If the value is CUSTOMER_BUCKET
then the location is the S3 bucket
specified in the outputBucketName
field when the transcription job was
started with the StartTranscriptionJob
operation.
If the value is SERVICE_BUCKET
then the output is stored by Amazon
Transcribe and can be retrieved using the URI in the
GetTranscriptionJob
response's TranscriptFileUri
field.
$sel:startTime:TranscriptionJobSummary'
, transcriptionJobSummary_startTime
- A timestamp that shows when the job started processing.
$sel:completionTime:TranscriptionJobSummary'
, transcriptionJobSummary_completionTime
- A timestamp that shows when the job was completed.
$sel:modelSettings:TranscriptionJobSummary'
, transcriptionJobSummary_modelSettings
- Undocumented member.
$sel:transcriptionJobStatus:TranscriptionJobSummary'
, transcriptionJobSummary_transcriptionJobStatus
- The status of the transcription job. When the status is COMPLETED
, use
the GetTranscriptionJob
operation to get the results of the
transcription.
$sel:transcriptionJobName:TranscriptionJobSummary'
, transcriptionJobSummary_transcriptionJobName
- The name of the transcription job.
$sel:identifyLanguage:TranscriptionJobSummary'
, transcriptionJobSummary_identifyLanguage
- Whether automatic language identification was enabled for a
transcription job.
transcriptionJobSummary_creationTime :: Lens' TranscriptionJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the job was created.
transcriptionJobSummary_failureReason :: Lens' TranscriptionJobSummary (Maybe Text) Source #
If the TranscriptionJobStatus
field is FAILED
, a description of the
error.
transcriptionJobSummary_contentRedaction :: Lens' TranscriptionJobSummary (Maybe ContentRedaction) Source #
The content redaction settings of the transcription job.
transcriptionJobSummary_identifiedLanguageScore :: Lens' TranscriptionJobSummary (Maybe Double) Source #
A value between zero and one that Amazon Transcribe assigned to the language it identified in the source audio. A higher score indicates that Amazon Transcribe is more confident in the language it identified.
transcriptionJobSummary_languageCode :: Lens' TranscriptionJobSummary (Maybe LanguageCode) Source #
The language code for the input speech.
transcriptionJobSummary_outputLocationType :: Lens' TranscriptionJobSummary (Maybe OutputLocationType) Source #
Indicates the location of the output of the transcription job.
If the value is CUSTOMER_BUCKET
then the location is the S3 bucket
specified in the outputBucketName
field when the transcription job was
started with the StartTranscriptionJob
operation.
If the value is SERVICE_BUCKET
then the output is stored by Amazon
Transcribe and can be retrieved using the URI in the
GetTranscriptionJob
response's TranscriptFileUri
field.
transcriptionJobSummary_startTime :: Lens' TranscriptionJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the job started processing.
transcriptionJobSummary_completionTime :: Lens' TranscriptionJobSummary (Maybe UTCTime) Source #
A timestamp that shows when the job was completed.
transcriptionJobSummary_modelSettings :: Lens' TranscriptionJobSummary (Maybe ModelSettings) Source #
Undocumented member.
transcriptionJobSummary_transcriptionJobStatus :: Lens' TranscriptionJobSummary (Maybe TranscriptionJobStatus) Source #
The status of the transcription job. When the status is COMPLETED
, use
the GetTranscriptionJob
operation to get the results of the
transcription.
transcriptionJobSummary_transcriptionJobName :: Lens' TranscriptionJobSummary (Maybe Text) Source #
The name of the transcription job.
transcriptionJobSummary_identifyLanguage :: Lens' TranscriptionJobSummary (Maybe Bool) Source #
Whether automatic language identification was enabled for a transcription job.
VocabularyFilterInfo
data VocabularyFilterInfo Source #
Provides information about a vocabulary filter.
See: newVocabularyFilterInfo
smart constructor.
VocabularyFilterInfo' | |
|
Instances
newVocabularyFilterInfo :: VocabularyFilterInfo Source #
Create a value of VocabularyFilterInfo
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:languageCode:VocabularyFilterInfo'
, vocabularyFilterInfo_languageCode
- The language code of the words in the vocabulary filter.
$sel:lastModifiedTime:VocabularyFilterInfo'
, vocabularyFilterInfo_lastModifiedTime
- The date and time that the vocabulary was last updated.
$sel:vocabularyFilterName:VocabularyFilterInfo'
, vocabularyFilterInfo_vocabularyFilterName
- The name of the vocabulary filter. The name must be unique in the
account that holds the filter.
vocabularyFilterInfo_languageCode :: Lens' VocabularyFilterInfo (Maybe LanguageCode) Source #
The language code of the words in the vocabulary filter.
vocabularyFilterInfo_lastModifiedTime :: Lens' VocabularyFilterInfo (Maybe UTCTime) Source #
The date and time that the vocabulary was last updated.
vocabularyFilterInfo_vocabularyFilterName :: Lens' VocabularyFilterInfo (Maybe Text) Source #
The name of the vocabulary filter. The name must be unique in the account that holds the filter.
VocabularyInfo
data VocabularyInfo Source #
Provides information about a custom vocabulary.
See: newVocabularyInfo
smart constructor.
VocabularyInfo' | |
|
Instances
newVocabularyInfo :: VocabularyInfo Source #
Create a value of VocabularyInfo
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:languageCode:VocabularyInfo'
, vocabularyInfo_languageCode
- The language code of the vocabulary entries.
$sel:vocabularyName:VocabularyInfo'
, vocabularyInfo_vocabularyName
- The name of the vocabulary.
$sel:lastModifiedTime:VocabularyInfo'
, vocabularyInfo_lastModifiedTime
- The date and time that the vocabulary was last modified.
$sel:vocabularyState:VocabularyInfo'
, vocabularyInfo_vocabularyState
- The processing state of the vocabulary. If the state is READY
you can
use the vocabulary in a StartTranscriptionJob
request.
vocabularyInfo_languageCode :: Lens' VocabularyInfo (Maybe LanguageCode) Source #
The language code of the vocabulary entries.
vocabularyInfo_vocabularyName :: Lens' VocabularyInfo (Maybe Text) Source #
The name of the vocabulary.
vocabularyInfo_lastModifiedTime :: Lens' VocabularyInfo (Maybe UTCTime) Source #
The date and time that the vocabulary was last modified.
vocabularyInfo_vocabularyState :: Lens' VocabularyInfo (Maybe VocabularyState) Source #
The processing state of the vocabulary. If the state is READY
you can
use the vocabulary in a StartTranscriptionJob
request.