{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.Comprehend.Types.ClassifierEvaluationMetrics
-- Copyright   : (c) 2013-2021 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay <brendan.g.hay+amazonka@gmail.com>
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
module Amazonka.Comprehend.Types.ClassifierEvaluationMetrics where

import qualified Amazonka.Core as Core
import qualified Amazonka.Lens as Lens
import qualified Amazonka.Prelude as Prelude

-- | Describes the result metrics for the test data associated with an
-- documentation classifier.
--
-- /See:/ 'newClassifierEvaluationMetrics' smart constructor.
data ClassifierEvaluationMetrics = ClassifierEvaluationMetrics'
  { -- | A measure of the usefulness of the recognizer results in the test data.
    -- High precision means that the recognizer returned substantially more
    -- relevant results than irrelevant ones. Unlike the Precision metric which
    -- comes from averaging the precision of all available labels, this is
    -- based on the overall score of all precision scores added together.
    ClassifierEvaluationMetrics -> Maybe Double
microPrecision :: Prelude.Maybe Prelude.Double,
    -- | A measure of how accurate the classifier results are for the test data.
    -- It is a combination of the @Micro Precision@ and @Micro Recall@ values.
    -- The @Micro F1Score@ is the harmonic mean of the two scores. The highest
    -- score is 1, and the worst score is 0.
    ClassifierEvaluationMetrics -> Maybe Double
microF1Score :: Prelude.Maybe Prelude.Double,
    -- | A measure of how complete the classifier results are for the test data.
    -- High recall means that the classifier returned most of the relevant
    -- results.
    ClassifierEvaluationMetrics -> Maybe Double
recall :: Prelude.Maybe Prelude.Double,
    -- | A measure of the usefulness of the classifier results in the test data.
    -- High precision means that the classifier returned substantially more
    -- relevant results than irrelevant ones.
    ClassifierEvaluationMetrics -> Maybe Double
precision :: Prelude.Maybe Prelude.Double,
    -- | A measure of how complete the classifier results are for the test data.
    -- High recall means that the classifier returned most of the relevant
    -- results. Specifically, this indicates how many of the correct categories
    -- in the text that the model can predict. It is a percentage of correct
    -- categories in the text that can found. Instead of averaging the recall
    -- scores of all labels (as with Recall), micro Recall is based on the
    -- overall score of all recall scores added together.
    ClassifierEvaluationMetrics -> Maybe Double
microRecall :: Prelude.Maybe Prelude.Double,
    -- | A measure of how accurate the classifier results are for the test data.
    -- It is derived from the @Precision@ and @Recall@ values. The @F1Score@ is
    -- the harmonic average of the two scores. The highest score is 1, and the
    -- worst score is 0.
    ClassifierEvaluationMetrics -> Maybe Double
f1Score :: Prelude.Maybe Prelude.Double,
    -- | Indicates the fraction of labels that are incorrectly predicted. Also
    -- seen as the fraction of wrong labels compared to the total number of
    -- labels. Scores closer to zero are better.
    ClassifierEvaluationMetrics -> Maybe Double
hammingLoss :: Prelude.Maybe Prelude.Double,
    -- | The fraction of the labels that were correct recognized. It is computed
    -- by dividing the number of labels in the test documents that were
    -- correctly recognized by the total number of labels in the test
    -- documents.
    ClassifierEvaluationMetrics -> Maybe Double
accuracy :: Prelude.Maybe Prelude.Double
  }
  deriving (ClassifierEvaluationMetrics -> ClassifierEvaluationMetrics -> Bool
(ClassifierEvaluationMetrics
 -> ClassifierEvaluationMetrics -> Bool)
-> (ClassifierEvaluationMetrics
    -> ClassifierEvaluationMetrics -> Bool)
-> Eq ClassifierEvaluationMetrics
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: ClassifierEvaluationMetrics -> ClassifierEvaluationMetrics -> Bool
$c/= :: ClassifierEvaluationMetrics -> ClassifierEvaluationMetrics -> Bool
== :: ClassifierEvaluationMetrics -> ClassifierEvaluationMetrics -> Bool
$c== :: ClassifierEvaluationMetrics -> ClassifierEvaluationMetrics -> Bool
Prelude.Eq, ReadPrec [ClassifierEvaluationMetrics]
ReadPrec ClassifierEvaluationMetrics
Int -> ReadS ClassifierEvaluationMetrics
ReadS [ClassifierEvaluationMetrics]
(Int -> ReadS ClassifierEvaluationMetrics)
-> ReadS [ClassifierEvaluationMetrics]
-> ReadPrec ClassifierEvaluationMetrics
-> ReadPrec [ClassifierEvaluationMetrics]
-> Read ClassifierEvaluationMetrics
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [ClassifierEvaluationMetrics]
$creadListPrec :: ReadPrec [ClassifierEvaluationMetrics]
readPrec :: ReadPrec ClassifierEvaluationMetrics
$creadPrec :: ReadPrec ClassifierEvaluationMetrics
readList :: ReadS [ClassifierEvaluationMetrics]
$creadList :: ReadS [ClassifierEvaluationMetrics]
readsPrec :: Int -> ReadS ClassifierEvaluationMetrics
$creadsPrec :: Int -> ReadS ClassifierEvaluationMetrics
Prelude.Read, Int -> ClassifierEvaluationMetrics -> ShowS
[ClassifierEvaluationMetrics] -> ShowS
ClassifierEvaluationMetrics -> String
(Int -> ClassifierEvaluationMetrics -> ShowS)
-> (ClassifierEvaluationMetrics -> String)
-> ([ClassifierEvaluationMetrics] -> ShowS)
-> Show ClassifierEvaluationMetrics
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [ClassifierEvaluationMetrics] -> ShowS
$cshowList :: [ClassifierEvaluationMetrics] -> ShowS
show :: ClassifierEvaluationMetrics -> String
$cshow :: ClassifierEvaluationMetrics -> String
showsPrec :: Int -> ClassifierEvaluationMetrics -> ShowS
$cshowsPrec :: Int -> ClassifierEvaluationMetrics -> ShowS
Prelude.Show, (forall x.
 ClassifierEvaluationMetrics -> Rep ClassifierEvaluationMetrics x)
-> (forall x.
    Rep ClassifierEvaluationMetrics x -> ClassifierEvaluationMetrics)
-> Generic ClassifierEvaluationMetrics
forall x.
Rep ClassifierEvaluationMetrics x -> ClassifierEvaluationMetrics
forall x.
ClassifierEvaluationMetrics -> Rep ClassifierEvaluationMetrics x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x.
Rep ClassifierEvaluationMetrics x -> ClassifierEvaluationMetrics
$cfrom :: forall x.
ClassifierEvaluationMetrics -> Rep ClassifierEvaluationMetrics x
Prelude.Generic)

-- |
-- Create a value of 'ClassifierEvaluationMetrics' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'microPrecision', 'classifierEvaluationMetrics_microPrecision' - A measure of the usefulness of the recognizer results in the test data.
-- High precision means that the recognizer returned substantially more
-- relevant results than irrelevant ones. Unlike the Precision metric which
-- comes from averaging the precision of all available labels, this is
-- based on the overall score of all precision scores added together.
--
-- 'microF1Score', 'classifierEvaluationMetrics_microF1Score' - A measure of how accurate the classifier results are for the test data.
-- It is a combination of the @Micro Precision@ and @Micro Recall@ values.
-- The @Micro F1Score@ is the harmonic mean of the two scores. The highest
-- score is 1, and the worst score is 0.
--
-- 'recall', 'classifierEvaluationMetrics_recall' - A measure of how complete the classifier results are for the test data.
-- High recall means that the classifier returned most of the relevant
-- results.
--
-- 'precision', 'classifierEvaluationMetrics_precision' - A measure of the usefulness of the classifier results in the test data.
-- High precision means that the classifier returned substantially more
-- relevant results than irrelevant ones.
--
-- 'microRecall', 'classifierEvaluationMetrics_microRecall' - A measure of how complete the classifier results are for the test data.
-- High recall means that the classifier returned most of the relevant
-- results. Specifically, this indicates how many of the correct categories
-- in the text that the model can predict. It is a percentage of correct
-- categories in the text that can found. Instead of averaging the recall
-- scores of all labels (as with Recall), micro Recall is based on the
-- overall score of all recall scores added together.
--
-- 'f1Score', 'classifierEvaluationMetrics_f1Score' - A measure of how accurate the classifier results are for the test data.
-- It is derived from the @Precision@ and @Recall@ values. The @F1Score@ is
-- the harmonic average of the two scores. The highest score is 1, and the
-- worst score is 0.
--
-- 'hammingLoss', 'classifierEvaluationMetrics_hammingLoss' - Indicates the fraction of labels that are incorrectly predicted. Also
-- seen as the fraction of wrong labels compared to the total number of
-- labels. Scores closer to zero are better.
--
-- 'accuracy', 'classifierEvaluationMetrics_accuracy' - The fraction of the labels that were correct recognized. It is computed
-- by dividing the number of labels in the test documents that were
-- correctly recognized by the total number of labels in the test
-- documents.
newClassifierEvaluationMetrics ::
  ClassifierEvaluationMetrics
newClassifierEvaluationMetrics :: ClassifierEvaluationMetrics
newClassifierEvaluationMetrics =
  ClassifierEvaluationMetrics' :: Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> ClassifierEvaluationMetrics
ClassifierEvaluationMetrics'
    { $sel:microPrecision:ClassifierEvaluationMetrics' :: Maybe Double
microPrecision =
        Maybe Double
forall a. Maybe a
Prelude.Nothing,
      $sel:microF1Score:ClassifierEvaluationMetrics' :: Maybe Double
microF1Score = Maybe Double
forall a. Maybe a
Prelude.Nothing,
      $sel:recall:ClassifierEvaluationMetrics' :: Maybe Double
recall = Maybe Double
forall a. Maybe a
Prelude.Nothing,
      $sel:precision:ClassifierEvaluationMetrics' :: Maybe Double
precision = Maybe Double
forall a. Maybe a
Prelude.Nothing,
      $sel:microRecall:ClassifierEvaluationMetrics' :: Maybe Double
microRecall = Maybe Double
forall a. Maybe a
Prelude.Nothing,
      $sel:f1Score:ClassifierEvaluationMetrics' :: Maybe Double
f1Score = Maybe Double
forall a. Maybe a
Prelude.Nothing,
      $sel:hammingLoss:ClassifierEvaluationMetrics' :: Maybe Double
hammingLoss = Maybe Double
forall a. Maybe a
Prelude.Nothing,
      $sel:accuracy:ClassifierEvaluationMetrics' :: Maybe Double
accuracy = Maybe Double
forall a. Maybe a
Prelude.Nothing
    }

-- | A measure of the usefulness of the recognizer results in the test data.
-- High precision means that the recognizer returned substantially more
-- relevant results than irrelevant ones. Unlike the Precision metric which
-- comes from averaging the precision of all available labels, this is
-- based on the overall score of all precision scores added together.
classifierEvaluationMetrics_microPrecision :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_microPrecision :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_microPrecision = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
microPrecision :: Maybe Double
$sel:microPrecision:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
microPrecision} -> Maybe Double
microPrecision) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:microPrecision:ClassifierEvaluationMetrics' :: Maybe Double
microPrecision = Maybe Double
a} :: ClassifierEvaluationMetrics)

-- | A measure of how accurate the classifier results are for the test data.
-- It is a combination of the @Micro Precision@ and @Micro Recall@ values.
-- The @Micro F1Score@ is the harmonic mean of the two scores. The highest
-- score is 1, and the worst score is 0.
classifierEvaluationMetrics_microF1Score :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_microF1Score :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_microF1Score = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
microF1Score :: Maybe Double
$sel:microF1Score:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
microF1Score} -> Maybe Double
microF1Score) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:microF1Score:ClassifierEvaluationMetrics' :: Maybe Double
microF1Score = Maybe Double
a} :: ClassifierEvaluationMetrics)

-- | A measure of how complete the classifier results are for the test data.
-- High recall means that the classifier returned most of the relevant
-- results.
classifierEvaluationMetrics_recall :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_recall :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_recall = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
recall :: Maybe Double
$sel:recall:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
recall} -> Maybe Double
recall) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:recall:ClassifierEvaluationMetrics' :: Maybe Double
recall = Maybe Double
a} :: ClassifierEvaluationMetrics)

-- | A measure of the usefulness of the classifier results in the test data.
-- High precision means that the classifier returned substantially more
-- relevant results than irrelevant ones.
classifierEvaluationMetrics_precision :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_precision :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_precision = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
precision :: Maybe Double
$sel:precision:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
precision} -> Maybe Double
precision) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:precision:ClassifierEvaluationMetrics' :: Maybe Double
precision = Maybe Double
a} :: ClassifierEvaluationMetrics)

-- | A measure of how complete the classifier results are for the test data.
-- High recall means that the classifier returned most of the relevant
-- results. Specifically, this indicates how many of the correct categories
-- in the text that the model can predict. It is a percentage of correct
-- categories in the text that can found. Instead of averaging the recall
-- scores of all labels (as with Recall), micro Recall is based on the
-- overall score of all recall scores added together.
classifierEvaluationMetrics_microRecall :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_microRecall :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_microRecall = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
microRecall :: Maybe Double
$sel:microRecall:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
microRecall} -> Maybe Double
microRecall) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:microRecall:ClassifierEvaluationMetrics' :: Maybe Double
microRecall = Maybe Double
a} :: ClassifierEvaluationMetrics)

-- | A measure of how accurate the classifier results are for the test data.
-- It is derived from the @Precision@ and @Recall@ values. The @F1Score@ is
-- the harmonic average of the two scores. The highest score is 1, and the
-- worst score is 0.
classifierEvaluationMetrics_f1Score :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_f1Score :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_f1Score = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
f1Score :: Maybe Double
$sel:f1Score:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
f1Score} -> Maybe Double
f1Score) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:f1Score:ClassifierEvaluationMetrics' :: Maybe Double
f1Score = Maybe Double
a} :: ClassifierEvaluationMetrics)

-- | Indicates the fraction of labels that are incorrectly predicted. Also
-- seen as the fraction of wrong labels compared to the total number of
-- labels. Scores closer to zero are better.
classifierEvaluationMetrics_hammingLoss :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_hammingLoss :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_hammingLoss = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
hammingLoss :: Maybe Double
$sel:hammingLoss:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
hammingLoss} -> Maybe Double
hammingLoss) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:hammingLoss:ClassifierEvaluationMetrics' :: Maybe Double
hammingLoss = Maybe Double
a} :: ClassifierEvaluationMetrics)

-- | The fraction of the labels that were correct recognized. It is computed
-- by dividing the number of labels in the test documents that were
-- correctly recognized by the total number of labels in the test
-- documents.
classifierEvaluationMetrics_accuracy :: Lens.Lens' ClassifierEvaluationMetrics (Prelude.Maybe Prelude.Double)
classifierEvaluationMetrics_accuracy :: (Maybe Double -> f (Maybe Double))
-> ClassifierEvaluationMetrics -> f ClassifierEvaluationMetrics
classifierEvaluationMetrics_accuracy = (ClassifierEvaluationMetrics -> Maybe Double)
-> (ClassifierEvaluationMetrics
    -> Maybe Double -> ClassifierEvaluationMetrics)
-> Lens
     ClassifierEvaluationMetrics
     ClassifierEvaluationMetrics
     (Maybe Double)
     (Maybe Double)
forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ClassifierEvaluationMetrics' {Maybe Double
accuracy :: Maybe Double
$sel:accuracy:ClassifierEvaluationMetrics' :: ClassifierEvaluationMetrics -> Maybe Double
accuracy} -> Maybe Double
accuracy) (\s :: ClassifierEvaluationMetrics
s@ClassifierEvaluationMetrics' {} Maybe Double
a -> ClassifierEvaluationMetrics
s {$sel:accuracy:ClassifierEvaluationMetrics' :: Maybe Double
accuracy = Maybe Double
a} :: ClassifierEvaluationMetrics)

instance Core.FromJSON ClassifierEvaluationMetrics where
  parseJSON :: Value -> Parser ClassifierEvaluationMetrics
parseJSON =
    String
-> (Object -> Parser ClassifierEvaluationMetrics)
-> Value
-> Parser ClassifierEvaluationMetrics
forall a. String -> (Object -> Parser a) -> Value -> Parser a
Core.withObject
      String
"ClassifierEvaluationMetrics"
      ( \Object
x ->
          Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> Maybe Double
-> ClassifierEvaluationMetrics
ClassifierEvaluationMetrics'
            (Maybe Double
 -> Maybe Double
 -> Maybe Double
 -> Maybe Double
 -> Maybe Double
 -> Maybe Double
 -> Maybe Double
 -> Maybe Double
 -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double)
-> Parser
     (Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> ClassifierEvaluationMetrics)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"MicroPrecision")
            Parser
  (Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double)
-> Parser
     (Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> ClassifierEvaluationMetrics)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"MicroF1Score")
            Parser
  (Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double)
-> Parser
     (Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> ClassifierEvaluationMetrics)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"Recall")
            Parser
  (Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double)
-> Parser
     (Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> Maybe Double
      -> ClassifierEvaluationMetrics)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"Precision")
            Parser
  (Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> Maybe Double
   -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double)
-> Parser
     (Maybe Double
      -> Maybe Double -> Maybe Double -> ClassifierEvaluationMetrics)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"MicroRecall")
            Parser
  (Maybe Double
   -> Maybe Double -> Maybe Double -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double)
-> Parser
     (Maybe Double -> Maybe Double -> ClassifierEvaluationMetrics)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"F1Score")
            Parser
  (Maybe Double -> Maybe Double -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double)
-> Parser (Maybe Double -> ClassifierEvaluationMetrics)
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"HammingLoss")
            Parser (Maybe Double -> ClassifierEvaluationMetrics)
-> Parser (Maybe Double) -> Parser ClassifierEvaluationMetrics
forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x Object -> Text -> Parser (Maybe Double)
forall a. FromJSON a => Object -> Text -> Parser (Maybe a)
Core..:? Text
"Accuracy")
      )

instance Prelude.Hashable ClassifierEvaluationMetrics

instance Prelude.NFData ClassifierEvaluationMetrics