{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.Polly.SynthesizeSpeech
-- Copyright   : (c) 2013-2021 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay <brendan.g.hay+amazonka@gmail.com>
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML
-- input must be valid, well-formed SSML. Some alphabets might not be
-- available with all the voices (for example, Cyrillic might not be read
-- at all by English voices) unless phoneme mapping is used. For more
-- information, see
-- <https://docs.aws.amazon.com/polly/latest/dg/how-text-to-speech-works.html How it Works>.
module Amazonka.Polly.SynthesizeSpeech
  ( -- * Creating a Request
    SynthesizeSpeech (..),
    newSynthesizeSpeech,

    -- * Request Lenses
    synthesizeSpeech_languageCode,
    synthesizeSpeech_engine,
    synthesizeSpeech_speechMarkTypes,
    synthesizeSpeech_sampleRate,
    synthesizeSpeech_textType,
    synthesizeSpeech_lexiconNames,
    synthesizeSpeech_outputFormat,
    synthesizeSpeech_text,
    synthesizeSpeech_voiceId,

    -- * Destructuring the Response
    SynthesizeSpeechResponse (..),
    newSynthesizeSpeechResponse,

    -- * Response Lenses
    synthesizeSpeechResponse_requestCharacters,
    synthesizeSpeechResponse_contentType,
    synthesizeSpeechResponse_httpStatus,
    synthesizeSpeechResponse_audioStream,
  )
where

import qualified Amazonka.Core as Core
import qualified Amazonka.Lens as Lens
import Amazonka.Polly.Types
import qualified Amazonka.Prelude as Prelude
import qualified Amazonka.Request as Request
import qualified Amazonka.Response as Response

-- | /See:/ 'newSynthesizeSpeech' smart constructor.
data SynthesizeSpeech = SynthesizeSpeech'
  { -- | Optional language code for the Synthesize Speech request. This is only
    -- necessary if using a bilingual voice, such as Aditi, which can be used
    -- for either Indian English (en-IN) or Hindi (hi-IN).
    --
    -- If a bilingual voice is used and no language code is specified, Amazon
    -- Polly uses the default language of the bilingual voice. The default
    -- language for any voice is the one returned by the
    -- <https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html DescribeVoices>
    -- operation for the @LanguageCode@ parameter. For example, if no language
    -- code is specified, Aditi will use Indian English rather than Hindi.
    SynthesizeSpeech -> Maybe LanguageCode
languageCode :: Prelude.Maybe LanguageCode,
    -- | Specifies the engine (@standard@ or @neural@) for Amazon Polly to use
    -- when processing input text for speech synthesis. For information on
    -- Amazon Polly voices and which voices are available in standard-only,
    -- NTTS-only, and both standard and NTTS formats, see
    -- <https://docs.aws.amazon.com/polly/latest/dg/voicelist.html Available Voices>.
    --
    -- __NTTS-only voices__
    --
    -- When using NTTS-only voices such as Kevin (en-US), this parameter is
    -- required and must be set to @neural@. If the engine is not specified, or
    -- is set to @standard@, this will result in an error.
    --
    -- Type: String
    --
    -- Valid Values: @standard@ | @neural@
    --
    -- Required: Yes
    --
    -- __Standard voices__
    --
    -- For standard voices, this is not required; the engine parameter defaults
    -- to @standard@. If the engine is not specified, or is set to @standard@
    -- and an NTTS-only voice is selected, this will result in an error.
    SynthesizeSpeech -> Maybe Engine
engine :: Prelude.Maybe Engine,
    -- | The type of speech marks returned for the input text.
    SynthesizeSpeech -> Maybe [SpeechMarkType]
speechMarkTypes :: Prelude.Maybe [SpeechMarkType],
    -- | The audio frequency specified in Hz.
    --
    -- The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\",
    -- \"22050\", and \"24000\". The default value for standard voices is
    -- \"22050\". The default value for neural voices is \"24000\".
    --
    -- Valid values for pcm are \"8000\" and \"16000\" The default value is
    -- \"16000\".
    SynthesizeSpeech -> Maybe Text
sampleRate :: Prelude.Maybe Prelude.Text,
    -- | Specifies whether the input text is plain text or SSML. The default
    -- value is plain text. For more information, see
    -- <https://docs.aws.amazon.com/polly/latest/dg/ssml.html Using SSML>.
    SynthesizeSpeech -> Maybe TextType
textType :: Prelude.Maybe TextType,
    -- | List of one or more pronunciation lexicon names you want the service to
    -- apply during synthesis. Lexicons are applied only if the language of the
    -- lexicon is the same as the language of the voice. For information about
    -- storing lexicons, see
    -- <https://docs.aws.amazon.com/polly/latest/dg/API_PutLexicon.html PutLexicon>.
    SynthesizeSpeech -> Maybe [Text]
lexiconNames :: Prelude.Maybe [Prelude.Text],
    -- | The format in which the returned output will be encoded. For audio
    -- stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this
    -- will be json.
    --
    -- When pcm is used, the content returned is audio\/pcm in a signed 16-bit,
    -- 1 channel (mono), little-endian format.
    SynthesizeSpeech -> OutputFormat
outputFormat :: OutputFormat,
    -- | Input text to synthesize. If you specify @ssml@ as the @TextType@,
    -- follow the SSML format for the input text.
    SynthesizeSpeech -> Text
text :: Prelude.Text,
    -- | Voice ID to use for the synthesis. You can get a list of available voice
    -- IDs by calling the
    -- <https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html DescribeVoices>
    -- operation.
    SynthesizeSpeech -> VoiceId
voiceId :: VoiceId
  }
  deriving (SynthesizeSpeech -> SynthesizeSpeech -> Bool
(SynthesizeSpeech -> SynthesizeSpeech -> Bool)
-> (SynthesizeSpeech -> SynthesizeSpeech -> Bool)
-> Eq SynthesizeSpeech
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: SynthesizeSpeech -> SynthesizeSpeech -> Bool
$c/= :: SynthesizeSpeech -> SynthesizeSpeech -> Bool
== :: SynthesizeSpeech -> SynthesizeSpeech -> Bool
$c== :: SynthesizeSpeech -> SynthesizeSpeech -> Bool
Prelude.Eq, ReadPrec [SynthesizeSpeech]
ReadPrec SynthesizeSpeech
Int -> ReadS SynthesizeSpeech
ReadS [SynthesizeSpeech]
(Int -> ReadS SynthesizeSpeech)
-> ReadS [SynthesizeSpeech]
-> ReadPrec SynthesizeSpeech
-> ReadPrec [SynthesizeSpeech]
-> Read SynthesizeSpeech
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [SynthesizeSpeech]
$creadListPrec :: ReadPrec [SynthesizeSpeech]
readPrec :: ReadPrec SynthesizeSpeech
$creadPrec :: ReadPrec SynthesizeSpeech
readList :: ReadS [SynthesizeSpeech]
$creadList :: ReadS [SynthesizeSpeech]
readsPrec :: Int -> ReadS SynthesizeSpeech
$creadsPrec :: Int -> ReadS SynthesizeSpeech
Prelude.Read, Int -> SynthesizeSpeech -> ShowS
[SynthesizeSpeech] -> ShowS
SynthesizeSpeech -> String
(Int -> SynthesizeSpeech -> ShowS)
-> (SynthesizeSpeech -> String)
-> ([SynthesizeSpeech] -> ShowS)
-> Show SynthesizeSpeech
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [SynthesizeSpeech] -> ShowS
$cshowList :: [SynthesizeSpeech] -> ShowS
show :: SynthesizeSpeech -> String
$cshow :: SynthesizeSpeech -> String
showsPrec :: Int -> SynthesizeSpeech -> ShowS
$cshowsPrec :: Int -> SynthesizeSpeech -> ShowS
Prelude.Show, (forall x. SynthesizeSpeech -> Rep SynthesizeSpeech x)
-> (forall x. Rep SynthesizeSpeech x -> SynthesizeSpeech)
-> Generic SynthesizeSpeech
forall x. Rep SynthesizeSpeech x -> SynthesizeSpeech
forall x. SynthesizeSpeech -> Rep SynthesizeSpeech x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep SynthesizeSpeech x -> SynthesizeSpeech
$cfrom :: forall x. SynthesizeSpeech -> Rep SynthesizeSpeech x
Prelude.Generic)

-- |
-- Create a value of 'SynthesizeSpeech' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'languageCode', 'synthesizeSpeech_languageCode' - Optional language code for the Synthesize Speech request. This is only
-- necessary if using a bilingual voice, such as Aditi, which can be used
-- for either Indian English (en-IN) or Hindi (hi-IN).
--
-- If a bilingual voice is used and no language code is specified, Amazon
-- Polly uses the default language of the bilingual voice. The default
-- language for any voice is the one returned by the
-- <https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html DescribeVoices>
-- operation for the @LanguageCode@ parameter. For example, if no language
-- code is specified, Aditi will use Indian English rather than Hindi.
--
-- 'engine', 'synthesizeSpeech_engine' - Specifies the engine (@standard@ or @neural@) for Amazon Polly to use
-- when processing input text for speech synthesis. For information on
-- Amazon Polly voices and which voices are available in standard-only,
-- NTTS-only, and both standard and NTTS formats, see
-- <https://docs.aws.amazon.com/polly/latest/dg/voicelist.html Available Voices>.
--
-- __NTTS-only voices__
--
-- When using NTTS-only voices such as Kevin (en-US), this parameter is
-- required and must be set to @neural@. If the engine is not specified, or
-- is set to @standard@, this will result in an error.
--
-- Type: String
--
-- Valid Values: @standard@ | @neural@
--
-- Required: Yes
--
-- __Standard voices__
--
-- For standard voices, this is not required; the engine parameter defaults
-- to @standard@. If the engine is not specified, or is set to @standard@
-- and an NTTS-only voice is selected, this will result in an error.
--
-- 'speechMarkTypes', 'synthesizeSpeech_speechMarkTypes' - The type of speech marks returned for the input text.
--
-- 'sampleRate', 'synthesizeSpeech_sampleRate' - The audio frequency specified in Hz.
--
-- The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\",
-- \"22050\", and \"24000\". The default value for standard voices is
-- \"22050\". The default value for neural voices is \"24000\".
--
-- Valid values for pcm are \"8000\" and \"16000\" The default value is
-- \"16000\".
--
-- 'textType', 'synthesizeSpeech_textType' - Specifies whether the input text is plain text or SSML. The default
-- value is plain text. For more information, see
-- <https://docs.aws.amazon.com/polly/latest/dg/ssml.html Using SSML>.
--
-- 'lexiconNames', 'synthesizeSpeech_lexiconNames' - List of one or more pronunciation lexicon names you want the service to
-- apply during synthesis. Lexicons are applied only if the language of the
-- lexicon is the same as the language of the voice. For information about
-- storing lexicons, see
-- <https://docs.aws.amazon.com/polly/latest/dg/API_PutLexicon.html PutLexicon>.
--
-- 'outputFormat', 'synthesizeSpeech_outputFormat' - The format in which the returned output will be encoded. For audio
-- stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this
-- will be json.
--
-- When pcm is used, the content returned is audio\/pcm in a signed 16-bit,
-- 1 channel (mono), little-endian format.
--
-- 'text', 'synthesizeSpeech_text' - Input text to synthesize. If you specify @ssml@ as the @TextType@,
-- follow the SSML format for the input text.
--
-- 'voiceId', 'synthesizeSpeech_voiceId' - Voice ID to use for the synthesis. You can get a list of available voice
-- IDs by calling the
-- <https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html DescribeVoices>
-- operation.
newSynthesizeSpeech ::
  -- | 'outputFormat'
  OutputFormat ->
  -- | 'text'
  Prelude.Text ->
  -- | 'voiceId'
  VoiceId ->
  SynthesizeSpeech
newSynthesizeSpeech :: OutputFormat -> Text -> VoiceId -> SynthesizeSpeech
newSynthesizeSpeech OutputFormat
pOutputFormat_ Text
pText_ VoiceId
pVoiceId_ =
  SynthesizeSpeech' :: Maybe LanguageCode
-> Maybe Engine
-> Maybe [SpeechMarkType]
-> Maybe Text
-> Maybe TextType
-> Maybe [Text]
-> OutputFormat
-> Text
-> VoiceId
-> SynthesizeSpeech
SynthesizeSpeech'
    { $sel:languageCode:SynthesizeSpeech' :: Maybe LanguageCode
languageCode = Maybe LanguageCode
forall a. Maybe a
Prelude.Nothing,
      $sel:engine:SynthesizeSpeech' :: Maybe Engine
engine = Maybe Engine
forall a. Maybe a
Prelude.Nothing,
      $sel:speechMarkTypes:SynthesizeSpeech' :: Maybe [SpeechMarkType]
speechMarkTypes = Maybe [SpeechMarkType]
forall a. Maybe a
Prelude.Nothing,
      $sel:sampleRate:SynthesizeSpeech' :: Maybe Text
sampleRate = Maybe Text
forall a. Maybe a
Prelude.Nothing,
      $sel:textType:SynthesizeSpeech' :: Maybe TextType
textType = Maybe TextType
forall a. Maybe a
Prelude.Nothing,
      $sel:lexiconNames:SynthesizeSpeech' :: Maybe [Text]
lexiconNames = Maybe [Text]
forall a. Maybe a
Prelude.Nothing,
      $sel:outputFormat:SynthesizeSpeech' :: OutputFormat
outputFormat = OutputFormat
pOutputFormat_,
      $sel:text:SynthesizeSpeech' :: Text
text = Text
pText_,
      $sel:voiceId:SynthesizeSpeech' :: VoiceId
voiceId = VoiceId
pVoiceId_
    }

-- | Optional language code for the Synthesize Speech request. This is only
-- necessary if using a bilingual voice, such as Aditi, which can be used
-- for either Indian English (en-IN) or Hindi (hi-IN).
--
-- If a bilingual voice is used and no language code is specified, Amazon
-- Polly uses the default language of the bilingual voice. The default
-- language for any voice is the one returned by the
-- <https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html DescribeVoices>
-- operation for the @LanguageCode@ parameter. For example, if no language
-- code is specified, Aditi will use Indian English rather than Hindi.
synthesizeSpeech_languageCode :: Lens.Lens' SynthesizeSpeech (Prelude.Maybe LanguageCode)
synthesizeSpeech_languageCode :: (Maybe LanguageCode -> f (Maybe LanguageCode))
-> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_languageCode = (SynthesizeSpeech -> Maybe LanguageCode)
-> (SynthesizeSpeech -> Maybe LanguageCode -> SynthesizeSpeech)
-> Lens
     SynthesizeSpeech
     SynthesizeSpeech
     (Maybe LanguageCode)
     (Maybe LanguageCode)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {Maybe LanguageCode
languageCode :: Maybe LanguageCode
$sel:languageCode:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe LanguageCode
languageCode} -> Maybe LanguageCode
languageCode) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} Maybe LanguageCode
a -> SynthesizeSpeech
s {$sel:languageCode:SynthesizeSpeech' :: Maybe LanguageCode
languageCode = Maybe LanguageCode
a} :: SynthesizeSpeech)

-- | Specifies the engine (@standard@ or @neural@) for Amazon Polly to use
-- when processing input text for speech synthesis. For information on
-- Amazon Polly voices and which voices are available in standard-only,
-- NTTS-only, and both standard and NTTS formats, see
-- <https://docs.aws.amazon.com/polly/latest/dg/voicelist.html Available Voices>.
--
-- __NTTS-only voices__
--
-- When using NTTS-only voices such as Kevin (en-US), this parameter is
-- required and must be set to @neural@. If the engine is not specified, or
-- is set to @standard@, this will result in an error.
--
-- Type: String
--
-- Valid Values: @standard@ | @neural@
--
-- Required: Yes
--
-- __Standard voices__
--
-- For standard voices, this is not required; the engine parameter defaults
-- to @standard@. If the engine is not specified, or is set to @standard@
-- and an NTTS-only voice is selected, this will result in an error.
synthesizeSpeech_engine :: Lens.Lens' SynthesizeSpeech (Prelude.Maybe Engine)
synthesizeSpeech_engine :: (Maybe Engine -> f (Maybe Engine))
-> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_engine = (SynthesizeSpeech -> Maybe Engine)
-> (SynthesizeSpeech -> Maybe Engine -> SynthesizeSpeech)
-> Lens
     SynthesizeSpeech SynthesizeSpeech (Maybe Engine) (Maybe Engine)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {Maybe Engine
engine :: Maybe Engine
$sel:engine:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe Engine
engine} -> Maybe Engine
engine) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} Maybe Engine
a -> SynthesizeSpeech
s {$sel:engine:SynthesizeSpeech' :: Maybe Engine
engine = Maybe Engine
a} :: SynthesizeSpeech)

-- | The type of speech marks returned for the input text.
synthesizeSpeech_speechMarkTypes :: Lens.Lens' SynthesizeSpeech (Prelude.Maybe [SpeechMarkType])
synthesizeSpeech_speechMarkTypes :: (Maybe [SpeechMarkType] -> f (Maybe [SpeechMarkType]))
-> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_speechMarkTypes = (SynthesizeSpeech -> Maybe [SpeechMarkType])
-> (SynthesizeSpeech -> Maybe [SpeechMarkType] -> SynthesizeSpeech)
-> Lens
     SynthesizeSpeech
     SynthesizeSpeech
     (Maybe [SpeechMarkType])
     (Maybe [SpeechMarkType])
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {Maybe [SpeechMarkType]
speechMarkTypes :: Maybe [SpeechMarkType]
$sel:speechMarkTypes:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe [SpeechMarkType]
speechMarkTypes} -> Maybe [SpeechMarkType]
speechMarkTypes) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} Maybe [SpeechMarkType]
a -> SynthesizeSpeech
s {$sel:speechMarkTypes:SynthesizeSpeech' :: Maybe [SpeechMarkType]
speechMarkTypes = Maybe [SpeechMarkType]
a} :: SynthesizeSpeech) ((Maybe [SpeechMarkType] -> f (Maybe [SpeechMarkType]))
 -> SynthesizeSpeech -> f SynthesizeSpeech)
-> ((Maybe [SpeechMarkType] -> f (Maybe [SpeechMarkType]))
    -> Maybe [SpeechMarkType] -> f (Maybe [SpeechMarkType]))
-> (Maybe [SpeechMarkType] -> f (Maybe [SpeechMarkType]))
-> SynthesizeSpeech
-> f SynthesizeSpeech
forall b c a. (b -> c) -> (a -> b) -> a -> c
Prelude.. AnIso
  [SpeechMarkType] [SpeechMarkType] [SpeechMarkType] [SpeechMarkType]
-> Iso
     (Maybe [SpeechMarkType])
     (Maybe [SpeechMarkType])
     (Maybe [SpeechMarkType])
     (Maybe [SpeechMarkType])
forall (f :: * -> *) (g :: * -> *) s t a b.
(Functor f, Functor g) =>
AnIso s t a b -> Iso (f s) (g t) (f a) (g b)
Lens.mapping AnIso
  [SpeechMarkType] [SpeechMarkType] [SpeechMarkType] [SpeechMarkType]
forall s t a b. (Coercible s a, Coercible t b) => Iso s t a b
Lens.coerced

-- | The audio frequency specified in Hz.
--
-- The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\",
-- \"22050\", and \"24000\". The default value for standard voices is
-- \"22050\". The default value for neural voices is \"24000\".
--
-- Valid values for pcm are \"8000\" and \"16000\" The default value is
-- \"16000\".
synthesizeSpeech_sampleRate :: Lens.Lens' SynthesizeSpeech (Prelude.Maybe Prelude.Text)
synthesizeSpeech_sampleRate :: (Maybe Text -> f (Maybe Text))
-> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_sampleRate = (SynthesizeSpeech -> Maybe Text)
-> (SynthesizeSpeech -> Maybe Text -> SynthesizeSpeech)
-> Lens SynthesizeSpeech SynthesizeSpeech (Maybe Text) (Maybe Text)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {Maybe Text
sampleRate :: Maybe Text
$sel:sampleRate:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe Text
sampleRate} -> Maybe Text
sampleRate) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} Maybe Text
a -> SynthesizeSpeech
s {$sel:sampleRate:SynthesizeSpeech' :: Maybe Text
sampleRate = Maybe Text
a} :: SynthesizeSpeech)

-- | Specifies whether the input text is plain text or SSML. The default
-- value is plain text. For more information, see
-- <https://docs.aws.amazon.com/polly/latest/dg/ssml.html Using SSML>.
synthesizeSpeech_textType :: Lens.Lens' SynthesizeSpeech (Prelude.Maybe TextType)
synthesizeSpeech_textType :: (Maybe TextType -> f (Maybe TextType))
-> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_textType = (SynthesizeSpeech -> Maybe TextType)
-> (SynthesizeSpeech -> Maybe TextType -> SynthesizeSpeech)
-> Lens
     SynthesizeSpeech SynthesizeSpeech (Maybe TextType) (Maybe TextType)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {Maybe TextType
textType :: Maybe TextType
$sel:textType:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe TextType
textType} -> Maybe TextType
textType) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} Maybe TextType
a -> SynthesizeSpeech
s {$sel:textType:SynthesizeSpeech' :: Maybe TextType
textType = Maybe TextType
a} :: SynthesizeSpeech)

-- | List of one or more pronunciation lexicon names you want the service to
-- apply during synthesis. Lexicons are applied only if the language of the
-- lexicon is the same as the language of the voice. For information about
-- storing lexicons, see
-- <https://docs.aws.amazon.com/polly/latest/dg/API_PutLexicon.html PutLexicon>.
synthesizeSpeech_lexiconNames :: Lens.Lens' SynthesizeSpeech (Prelude.Maybe [Prelude.Text])
synthesizeSpeech_lexiconNames :: (Maybe [Text] -> f (Maybe [Text]))
-> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_lexiconNames = (SynthesizeSpeech -> Maybe [Text])
-> (SynthesizeSpeech -> Maybe [Text] -> SynthesizeSpeech)
-> Lens
     SynthesizeSpeech SynthesizeSpeech (Maybe [Text]) (Maybe [Text])
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {Maybe [Text]
lexiconNames :: Maybe [Text]
$sel:lexiconNames:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe [Text]
lexiconNames} -> Maybe [Text]
lexiconNames) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} Maybe [Text]
a -> SynthesizeSpeech
s {$sel:lexiconNames:SynthesizeSpeech' :: Maybe [Text]
lexiconNames = Maybe [Text]
a} :: SynthesizeSpeech) ((Maybe [Text] -> f (Maybe [Text]))
 -> SynthesizeSpeech -> f SynthesizeSpeech)
-> ((Maybe [Text] -> f (Maybe [Text]))
    -> Maybe [Text] -> f (Maybe [Text]))
-> (Maybe [Text] -> f (Maybe [Text]))
-> SynthesizeSpeech
-> f SynthesizeSpeech
forall b c a. (b -> c) -> (a -> b) -> a -> c
Prelude.. AnIso [Text] [Text] [Text] [Text]
-> Iso (Maybe [Text]) (Maybe [Text]) (Maybe [Text]) (Maybe [Text])
forall (f :: * -> *) (g :: * -> *) s t a b.
(Functor f, Functor g) =>
AnIso s t a b -> Iso (f s) (g t) (f a) (g b)
Lens.mapping AnIso [Text] [Text] [Text] [Text]
forall s t a b. (Coercible s a, Coercible t b) => Iso s t a b
Lens.coerced

-- | The format in which the returned output will be encoded. For audio
-- stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this
-- will be json.
--
-- When pcm is used, the content returned is audio\/pcm in a signed 16-bit,
-- 1 channel (mono), little-endian format.
synthesizeSpeech_outputFormat :: Lens.Lens' SynthesizeSpeech OutputFormat
synthesizeSpeech_outputFormat :: (OutputFormat -> f OutputFormat)
-> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_outputFormat = (SynthesizeSpeech -> OutputFormat)
-> (SynthesizeSpeech -> OutputFormat -> SynthesizeSpeech)
-> Lens SynthesizeSpeech SynthesizeSpeech OutputFormat OutputFormat
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {OutputFormat
outputFormat :: OutputFormat
$sel:outputFormat:SynthesizeSpeech' :: SynthesizeSpeech -> OutputFormat
outputFormat} -> OutputFormat
outputFormat) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} OutputFormat
a -> SynthesizeSpeech
s {$sel:outputFormat:SynthesizeSpeech' :: OutputFormat
outputFormat = OutputFormat
a} :: SynthesizeSpeech)

-- | Input text to synthesize. If you specify @ssml@ as the @TextType@,
-- follow the SSML format for the input text.
synthesizeSpeech_text :: Lens.Lens' SynthesizeSpeech Prelude.Text
synthesizeSpeech_text :: (Text -> f Text) -> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_text = (SynthesizeSpeech -> Text)
-> (SynthesizeSpeech -> Text -> SynthesizeSpeech)
-> Lens SynthesizeSpeech SynthesizeSpeech Text Text
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {Text
text :: Text
$sel:text:SynthesizeSpeech' :: SynthesizeSpeech -> Text
text} -> Text
text) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} Text
a -> SynthesizeSpeech
s {$sel:text:SynthesizeSpeech' :: Text
text = Text
a} :: SynthesizeSpeech)

-- | Voice ID to use for the synthesis. You can get a list of available voice
-- IDs by calling the
-- <https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html DescribeVoices>
-- operation.
synthesizeSpeech_voiceId :: Lens.Lens' SynthesizeSpeech VoiceId
synthesizeSpeech_voiceId :: (VoiceId -> f VoiceId) -> SynthesizeSpeech -> f SynthesizeSpeech
synthesizeSpeech_voiceId = (SynthesizeSpeech -> VoiceId)
-> (SynthesizeSpeech -> VoiceId -> SynthesizeSpeech)
-> Lens SynthesizeSpeech SynthesizeSpeech VoiceId VoiceId
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeech' {VoiceId
voiceId :: VoiceId
$sel:voiceId:SynthesizeSpeech' :: SynthesizeSpeech -> VoiceId
voiceId} -> VoiceId
voiceId) (\s :: SynthesizeSpeech
s@SynthesizeSpeech' {} VoiceId
a -> SynthesizeSpeech
s {$sel:voiceId:SynthesizeSpeech' :: VoiceId
voiceId = VoiceId
a} :: SynthesizeSpeech)

instance Core.AWSRequest SynthesizeSpeech where
  type
    AWSResponse SynthesizeSpeech =
      SynthesizeSpeechResponse
  request :: SynthesizeSpeech -> Request SynthesizeSpeech
request = Service -> SynthesizeSpeech -> Request SynthesizeSpeech
forall a. (ToRequest a, ToJSON a) => Service -> a -> Request a
Request.postJSON Service
defaultService
  response :: Logger
-> Service
-> Proxy SynthesizeSpeech
-> ClientResponse ClientBody
-> m (Either Error (ClientResponse (AWSResponse SynthesizeSpeech)))
response =
    (Int
 -> ResponseHeaders
 -> ResponseBody
 -> Either String (AWSResponse SynthesizeSpeech))
-> Logger
-> Service
-> Proxy SynthesizeSpeech
-> ClientResponse ClientBody
-> m (Either Error (ClientResponse (AWSResponse SynthesizeSpeech)))
forall (m :: * -> *) a.
MonadResource m =>
(Int
 -> ResponseHeaders
 -> ResponseBody
 -> Either String (AWSResponse a))
-> Logger
-> Service
-> Proxy a
-> ClientResponse ClientBody
-> m (Either Error (ClientResponse (AWSResponse a)))
Response.receiveBody
      ( \Int
s ResponseHeaders
h ResponseBody
x ->
          Maybe Int
-> Maybe Text -> Int -> ResponseBody -> SynthesizeSpeechResponse
SynthesizeSpeechResponse'
            (Maybe Int
 -> Maybe Text -> Int -> ResponseBody -> SynthesizeSpeechResponse)
-> Either String (Maybe Int)
-> Either
     String
     (Maybe Text -> Int -> ResponseBody -> SynthesizeSpeechResponse)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (ResponseHeaders
h ResponseHeaders -> HeaderName -> Either String (Maybe Int)
forall a.
FromText a =>
ResponseHeaders -> HeaderName -> Either String (Maybe a)
Core..#? HeaderName
"x-amzn-RequestCharacters")
            Either
  String
  (Maybe Text -> Int -> ResponseBody -> SynthesizeSpeechResponse)
-> Either String (Maybe Text)
-> Either String (Int -> ResponseBody -> SynthesizeSpeechResponse)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (ResponseHeaders
h ResponseHeaders -> HeaderName -> Either String (Maybe Text)
forall a.
FromText a =>
ResponseHeaders -> HeaderName -> Either String (Maybe a)
Core..#? HeaderName
"Content-Type")
            Either String (Int -> ResponseBody -> SynthesizeSpeechResponse)
-> Either String Int
-> Either String (ResponseBody -> SynthesizeSpeechResponse)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Int -> Either String Int
forall (f :: * -> *) a. Applicative f => a -> f a
Prelude.pure (Int -> Int
forall a. Enum a => a -> Int
Prelude.fromEnum Int
s))
            Either String (ResponseBody -> SynthesizeSpeechResponse)
-> Either String ResponseBody
-> Either String SynthesizeSpeechResponse
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (ResponseBody -> Either String ResponseBody
forall (f :: * -> *) a. Applicative f => a -> f a
Prelude.pure ResponseBody
x)
      )

instance Prelude.Hashable SynthesizeSpeech

instance Prelude.NFData SynthesizeSpeech

instance Core.ToHeaders SynthesizeSpeech where
  toHeaders :: SynthesizeSpeech -> ResponseHeaders
toHeaders = ResponseHeaders -> SynthesizeSpeech -> ResponseHeaders
forall a b. a -> b -> a
Prelude.const ResponseHeaders
forall a. Monoid a => a
Prelude.mempty

instance Core.ToJSON SynthesizeSpeech where
  toJSON :: SynthesizeSpeech -> Value
toJSON SynthesizeSpeech' {Maybe [Text]
Maybe [SpeechMarkType]
Maybe Text
Maybe Engine
Maybe LanguageCode
Maybe TextType
Text
OutputFormat
VoiceId
voiceId :: VoiceId
text :: Text
outputFormat :: OutputFormat
lexiconNames :: Maybe [Text]
textType :: Maybe TextType
sampleRate :: Maybe Text
speechMarkTypes :: Maybe [SpeechMarkType]
engine :: Maybe Engine
languageCode :: Maybe LanguageCode
$sel:voiceId:SynthesizeSpeech' :: SynthesizeSpeech -> VoiceId
$sel:text:SynthesizeSpeech' :: SynthesizeSpeech -> Text
$sel:outputFormat:SynthesizeSpeech' :: SynthesizeSpeech -> OutputFormat
$sel:lexiconNames:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe [Text]
$sel:textType:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe TextType
$sel:sampleRate:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe Text
$sel:speechMarkTypes:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe [SpeechMarkType]
$sel:engine:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe Engine
$sel:languageCode:SynthesizeSpeech' :: SynthesizeSpeech -> Maybe LanguageCode
..} =
    [Pair] -> Value
Core.object
      ( [Maybe Pair] -> [Pair]
forall a. [Maybe a] -> [a]
Prelude.catMaybes
          [ (Text
"LanguageCode" Text -> LanguageCode -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (LanguageCode -> Pair) -> Maybe LanguageCode -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe LanguageCode
languageCode,
            (Text
"Engine" Text -> Engine -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (Engine -> Pair) -> Maybe Engine -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Engine
engine,
            (Text
"SpeechMarkTypes" Text -> [SpeechMarkType] -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=)
              ([SpeechMarkType] -> Pair) -> Maybe [SpeechMarkType] -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe [SpeechMarkType]
speechMarkTypes,
            (Text
"SampleRate" Text -> Text -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (Text -> Pair) -> Maybe Text -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
sampleRate,
            (Text
"TextType" Text -> TextType -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (TextType -> Pair) -> Maybe TextType -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe TextType
textType,
            (Text
"LexiconNames" Text -> [Text] -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) ([Text] -> Pair) -> Maybe [Text] -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe [Text]
lexiconNames,
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just (Text
"OutputFormat" Text -> OutputFormat -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= OutputFormat
outputFormat),
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just (Text
"Text" Text -> Text -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= Text
text),
            Pair -> Maybe Pair
forall a. a -> Maybe a
Prelude.Just (Text
"VoiceId" Text -> VoiceId -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..= VoiceId
voiceId)
          ]
      )

instance Core.ToPath SynthesizeSpeech where
  toPath :: SynthesizeSpeech -> ByteString
toPath = ByteString -> SynthesizeSpeech -> ByteString
forall a b. a -> b -> a
Prelude.const ByteString
"/v1/speech"

instance Core.ToQuery SynthesizeSpeech where
  toQuery :: SynthesizeSpeech -> QueryString
toQuery = QueryString -> SynthesizeSpeech -> QueryString
forall a b. a -> b -> a
Prelude.const QueryString
forall a. Monoid a => a
Prelude.mempty

-- | /See:/ 'newSynthesizeSpeechResponse' smart constructor.
data SynthesizeSpeechResponse = SynthesizeSpeechResponse'
  { -- | Number of characters synthesized.
    SynthesizeSpeechResponse -> Maybe Int
requestCharacters :: Prelude.Maybe Prelude.Int,
    -- | Specifies the type audio stream. This should reflect the @OutputFormat@
    -- parameter in your request.
    --
    -- -   If you request @mp3@ as the @OutputFormat@, the @ContentType@
    --     returned is audio\/mpeg.
    --
    -- -   If you request @ogg_vorbis@ as the @OutputFormat@, the @ContentType@
    --     returned is audio\/ogg.
    --
    -- -   If you request @pcm@ as the @OutputFormat@, the @ContentType@
    --     returned is audio\/pcm in a signed 16-bit, 1 channel (mono),
    --     little-endian format.
    --
    -- -   If you request @json@ as the @OutputFormat@, the @ContentType@
    --     returned is audio\/json.
    SynthesizeSpeechResponse -> Maybe Text
contentType :: Prelude.Maybe Prelude.Text,
    -- | The response's http status code.
    SynthesizeSpeechResponse -> Int
httpStatus :: Prelude.Int,
    -- | Stream containing the synthesized speech.
    SynthesizeSpeechResponse -> ResponseBody
audioStream :: Core.ResponseBody
  }
  deriving (Int -> SynthesizeSpeechResponse -> ShowS
[SynthesizeSpeechResponse] -> ShowS
SynthesizeSpeechResponse -> String
(Int -> SynthesizeSpeechResponse -> ShowS)
-> (SynthesizeSpeechResponse -> String)
-> ([SynthesizeSpeechResponse] -> ShowS)
-> Show SynthesizeSpeechResponse
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [SynthesizeSpeechResponse] -> ShowS
$cshowList :: [SynthesizeSpeechResponse] -> ShowS
show :: SynthesizeSpeechResponse -> String
$cshow :: SynthesizeSpeechResponse -> String
showsPrec :: Int -> SynthesizeSpeechResponse -> ShowS
$cshowsPrec :: Int -> SynthesizeSpeechResponse -> ShowS
Prelude.Show, (forall x.
 SynthesizeSpeechResponse -> Rep SynthesizeSpeechResponse x)
-> (forall x.
    Rep SynthesizeSpeechResponse x -> SynthesizeSpeechResponse)
-> Generic SynthesizeSpeechResponse
forall x.
Rep SynthesizeSpeechResponse x -> SynthesizeSpeechResponse
forall x.
SynthesizeSpeechResponse -> Rep SynthesizeSpeechResponse x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x.
Rep SynthesizeSpeechResponse x -> SynthesizeSpeechResponse
$cfrom :: forall x.
SynthesizeSpeechResponse -> Rep SynthesizeSpeechResponse x
Prelude.Generic)

-- |
-- Create a value of 'SynthesizeSpeechResponse' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'requestCharacters', 'synthesizeSpeechResponse_requestCharacters' - Number of characters synthesized.
--
-- 'contentType', 'synthesizeSpeechResponse_contentType' - Specifies the type audio stream. This should reflect the @OutputFormat@
-- parameter in your request.
--
-- -   If you request @mp3@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/mpeg.
--
-- -   If you request @ogg_vorbis@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/ogg.
--
-- -   If you request @pcm@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/pcm in a signed 16-bit, 1 channel (mono),
--     little-endian format.
--
-- -   If you request @json@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/json.
--
-- 'httpStatus', 'synthesizeSpeechResponse_httpStatus' - The response's http status code.
--
-- 'audioStream', 'synthesizeSpeechResponse_audioStream' - Stream containing the synthesized speech.
newSynthesizeSpeechResponse ::
  -- | 'httpStatus'
  Prelude.Int ->
  -- | 'audioStream'
  Core.ResponseBody ->
  SynthesizeSpeechResponse
newSynthesizeSpeechResponse :: Int -> ResponseBody -> SynthesizeSpeechResponse
newSynthesizeSpeechResponse
  Int
pHttpStatus_
  ResponseBody
pAudioStream_ =
    SynthesizeSpeechResponse' :: Maybe Int
-> Maybe Text -> Int -> ResponseBody -> SynthesizeSpeechResponse
SynthesizeSpeechResponse'
      { $sel:requestCharacters:SynthesizeSpeechResponse' :: Maybe Int
requestCharacters =
          Maybe Int
forall a. Maybe a
Prelude.Nothing,
        $sel:contentType:SynthesizeSpeechResponse' :: Maybe Text
contentType = Maybe Text
forall a. Maybe a
Prelude.Nothing,
        $sel:httpStatus:SynthesizeSpeechResponse' :: Int
httpStatus = Int
pHttpStatus_,
        $sel:audioStream:SynthesizeSpeechResponse' :: ResponseBody
audioStream = ResponseBody
pAudioStream_
      }

-- | Number of characters synthesized.
synthesizeSpeechResponse_requestCharacters :: Lens.Lens' SynthesizeSpeechResponse (Prelude.Maybe Prelude.Int)
synthesizeSpeechResponse_requestCharacters :: (Maybe Int -> f (Maybe Int))
-> SynthesizeSpeechResponse -> f SynthesizeSpeechResponse
synthesizeSpeechResponse_requestCharacters = (SynthesizeSpeechResponse -> Maybe Int)
-> (SynthesizeSpeechResponse
    -> Maybe Int -> SynthesizeSpeechResponse)
-> Lens
     SynthesizeSpeechResponse
     SynthesizeSpeechResponse
     (Maybe Int)
     (Maybe Int)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeechResponse' {Maybe Int
requestCharacters :: Maybe Int
$sel:requestCharacters:SynthesizeSpeechResponse' :: SynthesizeSpeechResponse -> Maybe Int
requestCharacters} -> Maybe Int
requestCharacters) (\s :: SynthesizeSpeechResponse
s@SynthesizeSpeechResponse' {} Maybe Int
a -> SynthesizeSpeechResponse
s {$sel:requestCharacters:SynthesizeSpeechResponse' :: Maybe Int
requestCharacters = Maybe Int
a} :: SynthesizeSpeechResponse)

-- | Specifies the type audio stream. This should reflect the @OutputFormat@
-- parameter in your request.
--
-- -   If you request @mp3@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/mpeg.
--
-- -   If you request @ogg_vorbis@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/ogg.
--
-- -   If you request @pcm@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/pcm in a signed 16-bit, 1 channel (mono),
--     little-endian format.
--
-- -   If you request @json@ as the @OutputFormat@, the @ContentType@
--     returned is audio\/json.
synthesizeSpeechResponse_contentType :: Lens.Lens' SynthesizeSpeechResponse (Prelude.Maybe Prelude.Text)
synthesizeSpeechResponse_contentType :: (Maybe Text -> f (Maybe Text))
-> SynthesizeSpeechResponse -> f SynthesizeSpeechResponse
synthesizeSpeechResponse_contentType = (SynthesizeSpeechResponse -> Maybe Text)
-> (SynthesizeSpeechResponse
    -> Maybe Text -> SynthesizeSpeechResponse)
-> Lens
     SynthesizeSpeechResponse
     SynthesizeSpeechResponse
     (Maybe Text)
     (Maybe Text)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeechResponse' {Maybe Text
contentType :: Maybe Text
$sel:contentType:SynthesizeSpeechResponse' :: SynthesizeSpeechResponse -> Maybe Text
contentType} -> Maybe Text
contentType) (\s :: SynthesizeSpeechResponse
s@SynthesizeSpeechResponse' {} Maybe Text
a -> SynthesizeSpeechResponse
s {$sel:contentType:SynthesizeSpeechResponse' :: Maybe Text
contentType = Maybe Text
a} :: SynthesizeSpeechResponse)

-- | The response's http status code.
synthesizeSpeechResponse_httpStatus :: Lens.Lens' SynthesizeSpeechResponse Prelude.Int
synthesizeSpeechResponse_httpStatus :: (Int -> f Int)
-> SynthesizeSpeechResponse -> f SynthesizeSpeechResponse
synthesizeSpeechResponse_httpStatus = (SynthesizeSpeechResponse -> Int)
-> (SynthesizeSpeechResponse -> Int -> SynthesizeSpeechResponse)
-> Lens SynthesizeSpeechResponse SynthesizeSpeechResponse Int Int
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeechResponse' {Int
httpStatus :: Int
$sel:httpStatus:SynthesizeSpeechResponse' :: SynthesizeSpeechResponse -> Int
httpStatus} -> Int
httpStatus) (\s :: SynthesizeSpeechResponse
s@SynthesizeSpeechResponse' {} Int
a -> SynthesizeSpeechResponse
s {$sel:httpStatus:SynthesizeSpeechResponse' :: Int
httpStatus = Int
a} :: SynthesizeSpeechResponse)

-- | Stream containing the synthesized speech.
synthesizeSpeechResponse_audioStream :: Lens.Lens' SynthesizeSpeechResponse Core.ResponseBody
synthesizeSpeechResponse_audioStream :: (ResponseBody -> f ResponseBody)
-> SynthesizeSpeechResponse -> f SynthesizeSpeechResponse
synthesizeSpeechResponse_audioStream = (SynthesizeSpeechResponse -> ResponseBody)
-> (SynthesizeSpeechResponse
    -> ResponseBody -> SynthesizeSpeechResponse)
-> Lens
     SynthesizeSpeechResponse
     SynthesizeSpeechResponse
     ResponseBody
     ResponseBody
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\SynthesizeSpeechResponse' {ResponseBody
audioStream :: ResponseBody
$sel:audioStream:SynthesizeSpeechResponse' :: SynthesizeSpeechResponse -> ResponseBody
audioStream} -> ResponseBody
audioStream) (\s :: SynthesizeSpeechResponse
s@SynthesizeSpeechResponse' {} ResponseBody
a -> SynthesizeSpeechResponse
s {$sel:audioStream:SynthesizeSpeechResponse' :: ResponseBody
audioStream = ResponseBody
a} :: SynthesizeSpeechResponse)