{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.Firehose.Types.ParquetSerDe
-- Copyright   : (c) 2013-2021 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay <brendan.g.hay+amazonka@gmail.com>
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
module Amazonka.Firehose.Types.ParquetSerDe where

import qualified Amazonka.Core as Core
import Amazonka.Firehose.Types.ParquetCompression
import Amazonka.Firehose.Types.ParquetWriterVersion
import qualified Amazonka.Lens as Lens
import qualified Amazonka.Prelude as Prelude

-- | A serializer to use for converting data to the Parquet format before
-- storing it in Amazon S3. For more information, see
-- <https://parquet.apache.org/documentation/latest/ Apache Parquet>.
--
-- /See:/ 'newParquetSerDe' smart constructor.
data ParquetSerDe = ParquetSerDe'
  { -- | Indicates the version of row format to output. The possible values are
    -- @V1@ and @V2@. The default is @V1@.
    ParquetSerDe -> Maybe ParquetWriterVersion
writerVersion :: Prelude.Maybe ParquetWriterVersion,
    -- | The compression code to use over data blocks. The possible values are
    -- @UNCOMPRESSED@, @SNAPPY@, and @GZIP@, with the default being @SNAPPY@.
    -- Use @SNAPPY@ for higher decompression speed. Use @GZIP@ if the
    -- compression ratio is more important than speed.
    ParquetSerDe -> Maybe ParquetCompression
compression :: Prelude.Maybe ParquetCompression,
    -- | The maximum amount of padding to apply. This is useful if you intend to
    -- copy the data from Amazon S3 to HDFS before querying. The default is 0.
    ParquetSerDe -> Maybe Natural
maxPaddingBytes :: Prelude.Maybe Prelude.Natural,
    -- | Indicates whether to enable dictionary compression.
    ParquetSerDe -> Maybe Bool
enableDictionaryCompression :: Prelude.Maybe Prelude.Bool,
    -- | The Parquet page size. Column chunks are divided into pages. A page is
    -- conceptually an indivisible unit (in terms of compression and encoding).
    -- The minimum value is 64 KiB and the default is 1 MiB.
    ParquetSerDe -> Maybe Natural
pageSizeBytes :: Prelude.Maybe Prelude.Natural,
    -- | The Hadoop Distributed File System (HDFS) block size. This is useful if
    -- you intend to copy the data from Amazon S3 to HDFS before querying. The
    -- default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses
    -- this value for padding calculations.
    ParquetSerDe -> Maybe Natural
blockSizeBytes :: Prelude.Maybe Prelude.Natural
  }
  deriving (ParquetSerDe -> ParquetSerDe -> Bool
(ParquetSerDe -> ParquetSerDe -> Bool)
-> (ParquetSerDe -> ParquetSerDe -> Bool) -> Eq ParquetSerDe
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: ParquetSerDe -> ParquetSerDe -> Bool
$c/= :: ParquetSerDe -> ParquetSerDe -> Bool
== :: ParquetSerDe -> ParquetSerDe -> Bool
$c== :: ParquetSerDe -> ParquetSerDe -> Bool
Prelude.Eq, ReadPrec [ParquetSerDe]
ReadPrec ParquetSerDe
Int -> ReadS ParquetSerDe
ReadS [ParquetSerDe]
(Int -> ReadS ParquetSerDe)
-> ReadS [ParquetSerDe]
-> ReadPrec ParquetSerDe
-> ReadPrec [ParquetSerDe]
-> Read ParquetSerDe
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [ParquetSerDe]
$creadListPrec :: ReadPrec [ParquetSerDe]
readPrec :: ReadPrec ParquetSerDe
$creadPrec :: ReadPrec ParquetSerDe
readList :: ReadS [ParquetSerDe]
$creadList :: ReadS [ParquetSerDe]
readsPrec :: Int -> ReadS ParquetSerDe
$creadsPrec :: Int -> ReadS ParquetSerDe
Prelude.Read, Int -> ParquetSerDe -> ShowS
[ParquetSerDe] -> ShowS
ParquetSerDe -> String
(Int -> ParquetSerDe -> ShowS)
-> (ParquetSerDe -> String)
-> ([ParquetSerDe] -> ShowS)
-> Show ParquetSerDe
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [ParquetSerDe] -> ShowS
$cshowList :: [ParquetSerDe] -> ShowS
show :: ParquetSerDe -> String
$cshow :: ParquetSerDe -> String
showsPrec :: Int -> ParquetSerDe -> ShowS
$cshowsPrec :: Int -> ParquetSerDe -> ShowS
Prelude.Show, (forall x. ParquetSerDe -> Rep ParquetSerDe x)
-> (forall x. Rep ParquetSerDe x -> ParquetSerDe)
-> Generic ParquetSerDe
forall x. Rep ParquetSerDe x -> ParquetSerDe
forall x. ParquetSerDe -> Rep ParquetSerDe x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep ParquetSerDe x -> ParquetSerDe
$cfrom :: forall x. ParquetSerDe -> Rep ParquetSerDe x
Prelude.Generic)

-- |
-- Create a value of 'ParquetSerDe' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'writerVersion', 'parquetSerDe_writerVersion' - Indicates the version of row format to output. The possible values are
-- @V1@ and @V2@. The default is @V1@.
--
-- 'compression', 'parquetSerDe_compression' - The compression code to use over data blocks. The possible values are
-- @UNCOMPRESSED@, @SNAPPY@, and @GZIP@, with the default being @SNAPPY@.
-- Use @SNAPPY@ for higher decompression speed. Use @GZIP@ if the
-- compression ratio is more important than speed.
--
-- 'maxPaddingBytes', 'parquetSerDe_maxPaddingBytes' - The maximum amount of padding to apply. This is useful if you intend to
-- copy the data from Amazon S3 to HDFS before querying. The default is 0.
--
-- 'enableDictionaryCompression', 'parquetSerDe_enableDictionaryCompression' - Indicates whether to enable dictionary compression.
--
-- 'pageSizeBytes', 'parquetSerDe_pageSizeBytes' - The Parquet page size. Column chunks are divided into pages. A page is
-- conceptually an indivisible unit (in terms of compression and encoding).
-- The minimum value is 64 KiB and the default is 1 MiB.
--
-- 'blockSizeBytes', 'parquetSerDe_blockSizeBytes' - The Hadoop Distributed File System (HDFS) block size. This is useful if
-- you intend to copy the data from Amazon S3 to HDFS before querying. The
-- default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses
-- this value for padding calculations.
newParquetSerDe ::
  ParquetSerDe
newParquetSerDe :: ParquetSerDe
newParquetSerDe =
  ParquetSerDe' :: Maybe ParquetWriterVersion
-> Maybe ParquetCompression
-> Maybe Natural
-> Maybe Bool
-> Maybe Natural
-> Maybe Natural
-> ParquetSerDe
ParquetSerDe'
    { $sel:writerVersion:ParquetSerDe' :: Maybe ParquetWriterVersion
writerVersion = Maybe ParquetWriterVersion
forall a. Maybe a
Prelude.Nothing,
      $sel:compression:ParquetSerDe' :: Maybe ParquetCompression
compression = Maybe ParquetCompression
forall a. Maybe a
Prelude.Nothing,
      $sel:maxPaddingBytes:ParquetSerDe' :: Maybe Natural
maxPaddingBytes = Maybe Natural
forall a. Maybe a
Prelude.Nothing,
      $sel:enableDictionaryCompression:ParquetSerDe' :: Maybe Bool
enableDictionaryCompression = Maybe Bool
forall a. Maybe a
Prelude.Nothing,
      $sel:pageSizeBytes:ParquetSerDe' :: Maybe Natural
pageSizeBytes = Maybe Natural
forall a. Maybe a
Prelude.Nothing,
      $sel:blockSizeBytes:ParquetSerDe' :: Maybe Natural
blockSizeBytes = Maybe Natural
forall a. Maybe a
Prelude.Nothing
    }

-- | Indicates the version of row format to output. The possible values are
-- @V1@ and @V2@. The default is @V1@.
parquetSerDe_writerVersion :: Lens.Lens' ParquetSerDe (Prelude.Maybe ParquetWriterVersion)
parquetSerDe_writerVersion :: (Maybe ParquetWriterVersion -> f (Maybe ParquetWriterVersion))
-> ParquetSerDe -> f ParquetSerDe
parquetSerDe_writerVersion = (ParquetSerDe -> Maybe ParquetWriterVersion)
-> (ParquetSerDe -> Maybe ParquetWriterVersion -> ParquetSerDe)
-> Lens
     ParquetSerDe
     ParquetSerDe
     (Maybe ParquetWriterVersion)
     (Maybe ParquetWriterVersion)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe ParquetWriterVersion
writerVersion :: Maybe ParquetWriterVersion
$sel:writerVersion:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetWriterVersion
writerVersion} -> Maybe ParquetWriterVersion
writerVersion) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe ParquetWriterVersion
a -> ParquetSerDe
s {$sel:writerVersion:ParquetSerDe' :: Maybe ParquetWriterVersion
writerVersion = Maybe ParquetWriterVersion
a} :: ParquetSerDe)

-- | The compression code to use over data blocks. The possible values are
-- @UNCOMPRESSED@, @SNAPPY@, and @GZIP@, with the default being @SNAPPY@.
-- Use @SNAPPY@ for higher decompression speed. Use @GZIP@ if the
-- compression ratio is more important than speed.
parquetSerDe_compression :: Lens.Lens' ParquetSerDe (Prelude.Maybe ParquetCompression)
parquetSerDe_compression :: (Maybe ParquetCompression -> f (Maybe ParquetCompression))
-> ParquetSerDe -> f ParquetSerDe
parquetSerDe_compression = (ParquetSerDe -> Maybe ParquetCompression)
-> (ParquetSerDe -> Maybe ParquetCompression -> ParquetSerDe)
-> Lens
     ParquetSerDe
     ParquetSerDe
     (Maybe ParquetCompression)
     (Maybe ParquetCompression)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe ParquetCompression
compression :: Maybe ParquetCompression
$sel:compression:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetCompression
compression} -> Maybe ParquetCompression
compression) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe ParquetCompression
a -> ParquetSerDe
s {$sel:compression:ParquetSerDe' :: Maybe ParquetCompression
compression = Maybe ParquetCompression
a} :: ParquetSerDe)

-- | The maximum amount of padding to apply. This is useful if you intend to
-- copy the data from Amazon S3 to HDFS before querying. The default is 0.
parquetSerDe_maxPaddingBytes :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Natural)
parquetSerDe_maxPaddingBytes :: (Maybe Natural -> f (Maybe Natural))
-> ParquetSerDe -> f ParquetSerDe
parquetSerDe_maxPaddingBytes = (ParquetSerDe -> Maybe Natural)
-> (ParquetSerDe -> Maybe Natural -> ParquetSerDe)
-> Lens ParquetSerDe ParquetSerDe (Maybe Natural) (Maybe Natural)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Natural
maxPaddingBytes :: Maybe Natural
$sel:maxPaddingBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
maxPaddingBytes} -> Maybe Natural
maxPaddingBytes) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Natural
a -> ParquetSerDe
s {$sel:maxPaddingBytes:ParquetSerDe' :: Maybe Natural
maxPaddingBytes = Maybe Natural
a} :: ParquetSerDe)

-- | Indicates whether to enable dictionary compression.
parquetSerDe_enableDictionaryCompression :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Bool)
parquetSerDe_enableDictionaryCompression :: (Maybe Bool -> f (Maybe Bool)) -> ParquetSerDe -> f ParquetSerDe
parquetSerDe_enableDictionaryCompression = (ParquetSerDe -> Maybe Bool)
-> (ParquetSerDe -> Maybe Bool -> ParquetSerDe)
-> Lens ParquetSerDe ParquetSerDe (Maybe Bool) (Maybe Bool)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Bool
enableDictionaryCompression :: Maybe Bool
$sel:enableDictionaryCompression:ParquetSerDe' :: ParquetSerDe -> Maybe Bool
enableDictionaryCompression} -> Maybe Bool
enableDictionaryCompression) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Bool
a -> ParquetSerDe
s {$sel:enableDictionaryCompression:ParquetSerDe' :: Maybe Bool
enableDictionaryCompression = Maybe Bool
a} :: ParquetSerDe)

-- | The Parquet page size. Column chunks are divided into pages. A page is
-- conceptually an indivisible unit (in terms of compression and encoding).
-- The minimum value is 64 KiB and the default is 1 MiB.
parquetSerDe_pageSizeBytes :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Natural)
parquetSerDe_pageSizeBytes :: (Maybe Natural -> f (Maybe Natural))
-> ParquetSerDe -> f ParquetSerDe
parquetSerDe_pageSizeBytes = (ParquetSerDe -> Maybe Natural)
-> (ParquetSerDe -> Maybe Natural -> ParquetSerDe)
-> Lens ParquetSerDe ParquetSerDe (Maybe Natural) (Maybe Natural)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Natural
pageSizeBytes :: Maybe Natural
$sel:pageSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
pageSizeBytes} -> Maybe Natural
pageSizeBytes) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Natural
a -> ParquetSerDe
s {$sel:pageSizeBytes:ParquetSerDe' :: Maybe Natural
pageSizeBytes = Maybe Natural
a} :: ParquetSerDe)

-- | The Hadoop Distributed File System (HDFS) block size. This is useful if
-- you intend to copy the data from Amazon S3 to HDFS before querying. The
-- default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses
-- this value for padding calculations.
parquetSerDe_blockSizeBytes :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Natural)
parquetSerDe_blockSizeBytes :: (Maybe Natural -> f (Maybe Natural))
-> ParquetSerDe -> f ParquetSerDe
parquetSerDe_blockSizeBytes = (ParquetSerDe -> Maybe Natural)
-> (ParquetSerDe -> Maybe Natural -> ParquetSerDe)
-> Lens ParquetSerDe ParquetSerDe (Maybe Natural) (Maybe Natural)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Natural
blockSizeBytes :: Maybe Natural
$sel:blockSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
blockSizeBytes} -> Maybe Natural
blockSizeBytes) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Natural
a -> ParquetSerDe
s {$sel:blockSizeBytes:ParquetSerDe' :: Maybe Natural
blockSizeBytes = Maybe Natural
a} :: ParquetSerDe)

instance Core.FromJSON ParquetSerDe where
  parseJSON :: Value -> Parser ParquetSerDe
parseJSON =
    String
-> (Object -> Parser ParquetSerDe) -> Value -> Parser ParquetSerDe
forall a. String -> (Object -> Parser a) -> Value -> Parser a
Core.withObject
      String
"ParquetSerDe"
      ( \Object
x ->
          Maybe ParquetWriterVersion
-> Maybe ParquetCompression
-> Maybe Natural
-> Maybe Bool
-> Maybe Natural
-> Maybe Natural
-> ParquetSerDe
ParquetSerDe'
            (Maybe ParquetWriterVersion
 -> Maybe ParquetCompression
 -> Maybe Natural
 -> Maybe Bool
 -> Maybe Natural
 -> Maybe Natural
 -> ParquetSerDe)
-> Parser (Maybe ParquetWriterVersion)
-> Parser
     (Maybe ParquetCompression
      -> Maybe Natural
      -> Maybe Bool
      -> Maybe Natural
      -> Maybe Natural
      -> ParquetSerDe)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (Object
x Object -> Text -> Parser (Maybe ParquetWriterVersion)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"WriterVersion")
            Parser
  (Maybe ParquetCompression
   -> Maybe Natural
   -> Maybe Bool
   -> Maybe Natural
   -> Maybe Natural
   -> ParquetSerDe)
-> Parser (Maybe ParquetCompression)
-> Parser
     (Maybe Natural
      -> Maybe Bool -> Maybe Natural -> Maybe Natural -> ParquetSerDe)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe ParquetCompression)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"Compression")
            Parser
  (Maybe Natural
   -> Maybe Bool -> Maybe Natural -> Maybe Natural -> ParquetSerDe)
-> Parser (Maybe Natural)
-> Parser
     (Maybe Bool -> Maybe Natural -> Maybe Natural -> ParquetSerDe)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Natural)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"MaxPaddingBytes")
            Parser
  (Maybe Bool -> Maybe Natural -> Maybe Natural -> ParquetSerDe)
-> Parser (Maybe Bool)
-> Parser (Maybe Natural -> Maybe Natural -> ParquetSerDe)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Bool)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"EnableDictionaryCompression")
            Parser (Maybe Natural -> Maybe Natural -> ParquetSerDe)
-> Parser (Maybe Natural) -> Parser (Maybe Natural -> ParquetSerDe)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Natural)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"PageSizeBytes")
            Parser (Maybe Natural -> ParquetSerDe)
-> Parser (Maybe Natural) -> Parser ParquetSerDe
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Natural)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"BlockSizeBytes")
      )

instance Prelude.Hashable ParquetSerDe

instance Prelude.NFData ParquetSerDe

instance Core.ToJSON ParquetSerDe where
  toJSON :: ParquetSerDe -> Value
toJSON ParquetSerDe' {Maybe Bool
Maybe Natural
Maybe ParquetCompression
Maybe ParquetWriterVersion
blockSizeBytes :: Maybe Natural
pageSizeBytes :: Maybe Natural
enableDictionaryCompression :: Maybe Bool
maxPaddingBytes :: Maybe Natural
compression :: Maybe ParquetCompression
writerVersion :: Maybe ParquetWriterVersion
$sel:blockSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:pageSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:enableDictionaryCompression:ParquetSerDe' :: ParquetSerDe -> Maybe Bool
$sel:maxPaddingBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:compression:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetCompression
$sel:writerVersion:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetWriterVersion
..} =
    [Pair] -> Value
Core.object
      ( [Maybe Pair] -> [Pair]
forall a. [Maybe a] -> [a]
Prelude.catMaybes
          [ (Text
"WriterVersion" Text -> ParquetWriterVersion -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (ParquetWriterVersion -> Pair)
-> Maybe ParquetWriterVersion -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe ParquetWriterVersion
writerVersion,
            (Text
"Compression" Text -> ParquetCompression -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (ParquetCompression -> Pair)
-> Maybe ParquetCompression -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe ParquetCompression
compression,
            (Text
"MaxPaddingBytes" Text -> Natural -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=)
              (Natural -> Pair) -> Maybe Natural -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Natural
maxPaddingBytes,
            (Text
"EnableDictionaryCompression" Text -> Bool -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=)
              (Bool -> Pair) -> Maybe Bool -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
enableDictionaryCompression,
            (Text
"PageSizeBytes" Text -> Natural -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=) (Natural -> Pair) -> Maybe Natural -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Natural
pageSizeBytes,
            (Text
"BlockSizeBytes" Text -> Natural -> Pair
forall kv v. (KeyValue kv, ToJSON v) => Text -> v -> kv
Core..=)
              (Natural -> Pair) -> Maybe Natural -> Maybe Pair
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Natural
blockSizeBytes
          ]
      )