Copyright | (c) 2013-2021 Brendan Hay |
---|---|
License | Mozilla Public License, v. 2.0. |
Maintainer | Brendan Hay <brendan.g.hay+amazonka@gmail.com> |
Stability | auto-generated |
Portability | non-portable (GHC extensions) |
Safe Haskell | None |
- Service Configuration
- Errors
- Algorithm
- BatchPredictionFilterVariable
- DataSourceFilterVariable
- DetailsAttributes
- EntityStatus
- EvaluationFilterVariable
- MLModelFilterVariable
- MLModelType
- RealtimeEndpointStatus
- SortOrder
- TaggableResourceType
- BatchPrediction
- DataSource
- Evaluation
- MLModel
- PerformanceMetrics
- Prediction
- RDSDataSpec
- RDSDatabase
- RDSDatabaseCredentials
- RDSMetadata
- RealtimeEndpointInfo
- RedshiftDataSpec
- RedshiftDatabase
- RedshiftDatabaseCredentials
- RedshiftMetadata
- S3DataSpec
- Tag
Synopsis
- defaultService :: Service
- _InvalidTagException :: AsError a => Getting (First ServiceError) a ServiceError
- _InternalServerException :: AsError a => Getting (First ServiceError) a ServiceError
- _InvalidInputException :: AsError a => Getting (First ServiceError) a ServiceError
- _IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError
- _TagLimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- _PredictorNotMountedException :: AsError a => Getting (First ServiceError) a ServiceError
- _ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError
- _LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- newtype Algorithm where
- Algorithm' { }
- pattern Algorithm_Sgd :: Algorithm
- newtype BatchPredictionFilterVariable where
- BatchPredictionFilterVariable' { }
- pattern BatchPredictionFilterVariable_CreatedAt :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_DataSourceId :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_DataURI :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_IAMUser :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_LastUpdatedAt :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_MLModelId :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_Name :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_Status :: BatchPredictionFilterVariable
- newtype DataSourceFilterVariable where
- DataSourceFilterVariable' { }
- pattern DataSourceFilterVariable_CreatedAt :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_DataLocationS3 :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_IAMUser :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_LastUpdatedAt :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_Name :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_Status :: DataSourceFilterVariable
- newtype DetailsAttributes where
- newtype EntityStatus where
- EntityStatus' { }
- pattern EntityStatus_COMPLETED :: EntityStatus
- pattern EntityStatus_DELETED :: EntityStatus
- pattern EntityStatus_FAILED :: EntityStatus
- pattern EntityStatus_INPROGRESS :: EntityStatus
- pattern EntityStatus_PENDING :: EntityStatus
- newtype EvaluationFilterVariable where
- EvaluationFilterVariable' { }
- pattern EvaluationFilterVariable_CreatedAt :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_DataSourceId :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_DataURI :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_IAMUser :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_LastUpdatedAt :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_MLModelId :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_Name :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_Status :: EvaluationFilterVariable
- newtype MLModelFilterVariable where
- MLModelFilterVariable' { }
- pattern MLModelFilterVariable_Algorithm :: MLModelFilterVariable
- pattern MLModelFilterVariable_CreatedAt :: MLModelFilterVariable
- pattern MLModelFilterVariable_IAMUser :: MLModelFilterVariable
- pattern MLModelFilterVariable_LastUpdatedAt :: MLModelFilterVariable
- pattern MLModelFilterVariable_MLModelType :: MLModelFilterVariable
- pattern MLModelFilterVariable_Name :: MLModelFilterVariable
- pattern MLModelFilterVariable_RealtimeEndpointStatus :: MLModelFilterVariable
- pattern MLModelFilterVariable_Status :: MLModelFilterVariable
- pattern MLModelFilterVariable_TrainingDataSourceId :: MLModelFilterVariable
- pattern MLModelFilterVariable_TrainingDataURI :: MLModelFilterVariable
- newtype MLModelType where
- MLModelType' { }
- pattern MLModelType_BINARY :: MLModelType
- pattern MLModelType_MULTICLASS :: MLModelType
- pattern MLModelType_REGRESSION :: MLModelType
- newtype RealtimeEndpointStatus where
- newtype SortOrder where
- SortOrder' { }
- pattern SortOrder_Asc :: SortOrder
- pattern SortOrder_Dsc :: SortOrder
- newtype TaggableResourceType where
- data BatchPrediction = BatchPrediction' {
- status :: Maybe EntityStatus
- lastUpdatedAt :: Maybe POSIX
- createdAt :: Maybe POSIX
- computeTime :: Maybe Integer
- inputDataLocationS3 :: Maybe Text
- mLModelId :: Maybe Text
- batchPredictionDataSourceId :: Maybe Text
- totalRecordCount :: Maybe Integer
- startedAt :: Maybe POSIX
- batchPredictionId :: Maybe Text
- finishedAt :: Maybe POSIX
- invalidRecordCount :: Maybe Integer
- createdByIamUser :: Maybe Text
- name :: Maybe Text
- message :: Maybe Text
- outputUri :: Maybe Text
- newBatchPrediction :: BatchPrediction
- batchPrediction_status :: Lens' BatchPrediction (Maybe EntityStatus)
- batchPrediction_lastUpdatedAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_createdAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_computeTime :: Lens' BatchPrediction (Maybe Integer)
- batchPrediction_inputDataLocationS3 :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_mLModelId :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_batchPredictionDataSourceId :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_totalRecordCount :: Lens' BatchPrediction (Maybe Integer)
- batchPrediction_startedAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_batchPredictionId :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_finishedAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_invalidRecordCount :: Lens' BatchPrediction (Maybe Integer)
- batchPrediction_createdByIamUser :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_name :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_message :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_outputUri :: Lens' BatchPrediction (Maybe Text)
- data DataSource = DataSource' {
- status :: Maybe EntityStatus
- numberOfFiles :: Maybe Integer
- lastUpdatedAt :: Maybe POSIX
- createdAt :: Maybe POSIX
- computeTime :: Maybe Integer
- dataSourceId :: Maybe Text
- rDSMetadata :: Maybe RDSMetadata
- dataSizeInBytes :: Maybe Integer
- startedAt :: Maybe POSIX
- finishedAt :: Maybe POSIX
- createdByIamUser :: Maybe Text
- name :: Maybe Text
- dataLocationS3 :: Maybe Text
- computeStatistics :: Maybe Bool
- message :: Maybe Text
- redshiftMetadata :: Maybe RedshiftMetadata
- dataRearrangement :: Maybe Text
- roleARN :: Maybe Text
- newDataSource :: DataSource
- dataSource_status :: Lens' DataSource (Maybe EntityStatus)
- dataSource_numberOfFiles :: Lens' DataSource (Maybe Integer)
- dataSource_lastUpdatedAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_createdAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_computeTime :: Lens' DataSource (Maybe Integer)
- dataSource_dataSourceId :: Lens' DataSource (Maybe Text)
- dataSource_rDSMetadata :: Lens' DataSource (Maybe RDSMetadata)
- dataSource_dataSizeInBytes :: Lens' DataSource (Maybe Integer)
- dataSource_startedAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_finishedAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_createdByIamUser :: Lens' DataSource (Maybe Text)
- dataSource_name :: Lens' DataSource (Maybe Text)
- dataSource_dataLocationS3 :: Lens' DataSource (Maybe Text)
- dataSource_computeStatistics :: Lens' DataSource (Maybe Bool)
- dataSource_message :: Lens' DataSource (Maybe Text)
- dataSource_redshiftMetadata :: Lens' DataSource (Maybe RedshiftMetadata)
- dataSource_dataRearrangement :: Lens' DataSource (Maybe Text)
- dataSource_roleARN :: Lens' DataSource (Maybe Text)
- data Evaluation = Evaluation' {
- status :: Maybe EntityStatus
- performanceMetrics :: Maybe PerformanceMetrics
- lastUpdatedAt :: Maybe POSIX
- createdAt :: Maybe POSIX
- computeTime :: Maybe Integer
- inputDataLocationS3 :: Maybe Text
- mLModelId :: Maybe Text
- startedAt :: Maybe POSIX
- finishedAt :: Maybe POSIX
- createdByIamUser :: Maybe Text
- name :: Maybe Text
- evaluationId :: Maybe Text
- message :: Maybe Text
- evaluationDataSourceId :: Maybe Text
- newEvaluation :: Evaluation
- evaluation_status :: Lens' Evaluation (Maybe EntityStatus)
- evaluation_performanceMetrics :: Lens' Evaluation (Maybe PerformanceMetrics)
- evaluation_lastUpdatedAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_createdAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_computeTime :: Lens' Evaluation (Maybe Integer)
- evaluation_inputDataLocationS3 :: Lens' Evaluation (Maybe Text)
- evaluation_mLModelId :: Lens' Evaluation (Maybe Text)
- evaluation_startedAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_finishedAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_createdByIamUser :: Lens' Evaluation (Maybe Text)
- evaluation_name :: Lens' Evaluation (Maybe Text)
- evaluation_evaluationId :: Lens' Evaluation (Maybe Text)
- evaluation_message :: Lens' Evaluation (Maybe Text)
- evaluation_evaluationDataSourceId :: Lens' Evaluation (Maybe Text)
- data MLModel = MLModel' {
- status :: Maybe EntityStatus
- lastUpdatedAt :: Maybe POSIX
- trainingParameters :: Maybe (HashMap Text Text)
- scoreThresholdLastUpdatedAt :: Maybe POSIX
- createdAt :: Maybe POSIX
- computeTime :: Maybe Integer
- inputDataLocationS3 :: Maybe Text
- mLModelId :: Maybe Text
- sizeInBytes :: Maybe Integer
- startedAt :: Maybe POSIX
- scoreThreshold :: Maybe Double
- finishedAt :: Maybe POSIX
- algorithm :: Maybe Algorithm
- createdByIamUser :: Maybe Text
- name :: Maybe Text
- endpointInfo :: Maybe RealtimeEndpointInfo
- trainingDataSourceId :: Maybe Text
- message :: Maybe Text
- mLModelType :: Maybe MLModelType
- newMLModel :: MLModel
- mLModel_status :: Lens' MLModel (Maybe EntityStatus)
- mLModel_lastUpdatedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_trainingParameters :: Lens' MLModel (Maybe (HashMap Text Text))
- mLModel_scoreThresholdLastUpdatedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_createdAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_computeTime :: Lens' MLModel (Maybe Integer)
- mLModel_inputDataLocationS3 :: Lens' MLModel (Maybe Text)
- mLModel_mLModelId :: Lens' MLModel (Maybe Text)
- mLModel_sizeInBytes :: Lens' MLModel (Maybe Integer)
- mLModel_startedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_scoreThreshold :: Lens' MLModel (Maybe Double)
- mLModel_finishedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_algorithm :: Lens' MLModel (Maybe Algorithm)
- mLModel_createdByIamUser :: Lens' MLModel (Maybe Text)
- mLModel_name :: Lens' MLModel (Maybe Text)
- mLModel_endpointInfo :: Lens' MLModel (Maybe RealtimeEndpointInfo)
- mLModel_trainingDataSourceId :: Lens' MLModel (Maybe Text)
- mLModel_message :: Lens' MLModel (Maybe Text)
- mLModel_mLModelType :: Lens' MLModel (Maybe MLModelType)
- data PerformanceMetrics = PerformanceMetrics' {
- properties :: Maybe (HashMap Text Text)
- newPerformanceMetrics :: PerformanceMetrics
- performanceMetrics_properties :: Lens' PerformanceMetrics (Maybe (HashMap Text Text))
- data Prediction = Prediction' {}
- newPrediction :: Prediction
- prediction_predictedValue :: Lens' Prediction (Maybe Double)
- prediction_predictedLabel :: Lens' Prediction (Maybe Text)
- prediction_predictedScores :: Lens' Prediction (Maybe (HashMap Text Double))
- prediction_details :: Lens' Prediction (Maybe (HashMap DetailsAttributes Text))
- data RDSDataSpec = RDSDataSpec' {}
- newRDSDataSpec :: RDSDatabase -> Text -> RDSDatabaseCredentials -> Text -> Text -> Text -> Text -> RDSDataSpec
- rDSDataSpec_dataSchemaUri :: Lens' RDSDataSpec (Maybe Text)
- rDSDataSpec_dataSchema :: Lens' RDSDataSpec (Maybe Text)
- rDSDataSpec_dataRearrangement :: Lens' RDSDataSpec (Maybe Text)
- rDSDataSpec_databaseInformation :: Lens' RDSDataSpec RDSDatabase
- rDSDataSpec_selectSqlQuery :: Lens' RDSDataSpec Text
- rDSDataSpec_databaseCredentials :: Lens' RDSDataSpec RDSDatabaseCredentials
- rDSDataSpec_s3StagingLocation :: Lens' RDSDataSpec Text
- rDSDataSpec_resourceRole :: Lens' RDSDataSpec Text
- rDSDataSpec_serviceRole :: Lens' RDSDataSpec Text
- rDSDataSpec_subnetId :: Lens' RDSDataSpec Text
- rDSDataSpec_securityGroupIds :: Lens' RDSDataSpec [Text]
- data RDSDatabase = RDSDatabase' {}
- newRDSDatabase :: Text -> Text -> RDSDatabase
- rDSDatabase_instanceIdentifier :: Lens' RDSDatabase Text
- rDSDatabase_databaseName :: Lens' RDSDatabase Text
- data RDSDatabaseCredentials = RDSDatabaseCredentials' {}
- newRDSDatabaseCredentials :: Text -> Text -> RDSDatabaseCredentials
- rDSDatabaseCredentials_username :: Lens' RDSDatabaseCredentials Text
- rDSDatabaseCredentials_password :: Lens' RDSDatabaseCredentials Text
- data RDSMetadata = RDSMetadata' {}
- newRDSMetadata :: RDSMetadata
- rDSMetadata_selectSqlQuery :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_dataPipelineId :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_database :: Lens' RDSMetadata (Maybe RDSDatabase)
- rDSMetadata_databaseUserName :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_resourceRole :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_serviceRole :: Lens' RDSMetadata (Maybe Text)
- data RealtimeEndpointInfo = RealtimeEndpointInfo' {}
- newRealtimeEndpointInfo :: RealtimeEndpointInfo
- realtimeEndpointInfo_createdAt :: Lens' RealtimeEndpointInfo (Maybe UTCTime)
- realtimeEndpointInfo_endpointUrl :: Lens' RealtimeEndpointInfo (Maybe Text)
- realtimeEndpointInfo_endpointStatus :: Lens' RealtimeEndpointInfo (Maybe RealtimeEndpointStatus)
- realtimeEndpointInfo_peakRequestsPerSecond :: Lens' RealtimeEndpointInfo (Maybe Int)
- data RedshiftDataSpec = RedshiftDataSpec' {}
- newRedshiftDataSpec :: RedshiftDatabase -> Text -> RedshiftDatabaseCredentials -> Text -> RedshiftDataSpec
- redshiftDataSpec_dataSchemaUri :: Lens' RedshiftDataSpec (Maybe Text)
- redshiftDataSpec_dataSchema :: Lens' RedshiftDataSpec (Maybe Text)
- redshiftDataSpec_dataRearrangement :: Lens' RedshiftDataSpec (Maybe Text)
- redshiftDataSpec_databaseInformation :: Lens' RedshiftDataSpec RedshiftDatabase
- redshiftDataSpec_selectSqlQuery :: Lens' RedshiftDataSpec Text
- redshiftDataSpec_databaseCredentials :: Lens' RedshiftDataSpec RedshiftDatabaseCredentials
- redshiftDataSpec_s3StagingLocation :: Lens' RedshiftDataSpec Text
- data RedshiftDatabase = RedshiftDatabase' {}
- newRedshiftDatabase :: Text -> Text -> RedshiftDatabase
- redshiftDatabase_databaseName :: Lens' RedshiftDatabase Text
- redshiftDatabase_clusterIdentifier :: Lens' RedshiftDatabase Text
- data RedshiftDatabaseCredentials = RedshiftDatabaseCredentials' {}
- newRedshiftDatabaseCredentials :: Text -> Text -> RedshiftDatabaseCredentials
- redshiftDatabaseCredentials_username :: Lens' RedshiftDatabaseCredentials Text
- redshiftDatabaseCredentials_password :: Lens' RedshiftDatabaseCredentials Text
- data RedshiftMetadata = RedshiftMetadata' {}
- newRedshiftMetadata :: RedshiftMetadata
- redshiftMetadata_selectSqlQuery :: Lens' RedshiftMetadata (Maybe Text)
- redshiftMetadata_redshiftDatabase :: Lens' RedshiftMetadata (Maybe RedshiftDatabase)
- redshiftMetadata_databaseUserName :: Lens' RedshiftMetadata (Maybe Text)
- data S3DataSpec = S3DataSpec' {}
- newS3DataSpec :: Text -> S3DataSpec
- s3DataSpec_dataSchema :: Lens' S3DataSpec (Maybe Text)
- s3DataSpec_dataSchemaLocationS3 :: Lens' S3DataSpec (Maybe Text)
- s3DataSpec_dataRearrangement :: Lens' S3DataSpec (Maybe Text)
- s3DataSpec_dataLocationS3 :: Lens' S3DataSpec Text
- data Tag = Tag' {}
- newTag :: Tag
- tag_value :: Lens' Tag (Maybe Text)
- tag_key :: Lens' Tag (Maybe Text)
Service Configuration
defaultService :: Service Source #
API version 2014-12-12
of the Amazon Machine Learning SDK configuration.
Errors
_InvalidTagException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Prism for InvalidTagException' errors.
_InternalServerException :: AsError a => Getting (First ServiceError) a ServiceError Source #
An error on the server occurred when trying to process a request.
_InvalidInputException :: AsError a => Getting (First ServiceError) a ServiceError Source #
An error on the client occurred. Typically, the cause is an invalid input value.
_IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError Source #
A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.
_TagLimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Prism for TagLimitExceededException' errors.
_PredictorNotMountedException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The exception is thrown when a predict request is made to an unmounted
MLModel
.
_ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError Source #
A specified resource cannot be located.
_LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The subscriber exceeded the maximum number of operations. This exception
can occur when listing objects such as DataSource
.
Algorithm
The function used to train an MLModel
. Training choices supported by
Amazon ML include the following:
SGD
- Stochastic Gradient Descent.RandomForest
- Random forest of decision trees.
pattern Algorithm_Sgd :: Algorithm |
Instances
BatchPredictionFilterVariable
newtype BatchPredictionFilterVariable Source #
A list of the variables to use in searching or filtering
BatchPrediction
.
CreatedAt
- Sets the search criteria toBatchPrediction
creation date.Status
- Sets the search criteria toBatchPrediction
status.Name
- Sets the search criteria to the contents ofBatchPrediction
Name
.IAMUser
- Sets the search criteria to the user account that invoked theBatchPrediction
creation.MLModelId
- Sets the search criteria to theMLModel
used in theBatchPrediction
.DataSourceId
- Sets the search criteria to theDataSource
used in theBatchPrediction
.DataURI
- Sets the search criteria to the data file(s) used in theBatchPrediction
. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
Instances
DataSourceFilterVariable
newtype DataSourceFilterVariable Source #
A list of the variables to use in searching or filtering DataSource
.
CreatedAt
- Sets the search criteria toDataSource
creation date.Status
- Sets the search criteria toDataSource
status.Name
- Sets the search criteria to the contents ofDataSource
Name
.DataUri
- Sets the search criteria to the URI of data files used to create theDataSource
. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.IAMUser
- Sets the search criteria to the user account that invoked theDataSource
creation.
Note: The variable names should match the variable names in the
DataSource
.
Instances
DetailsAttributes
newtype DetailsAttributes Source #
Contains the key values of DetailsMap
:
PredictiveModelType
- Indicates the type of theMLModel
.Algorithm
- Indicates the algorithm that was used for theMLModel
.
pattern DetailsAttributes_Algorithm :: DetailsAttributes | |
pattern DetailsAttributes_PredictiveModelType :: DetailsAttributes |
Instances
EntityStatus
newtype EntityStatus Source #
Object status with the following possible values:
PENDING
INPROGRESS
FAILED
COMPLETED
DELETED
pattern EntityStatus_COMPLETED :: EntityStatus | |
pattern EntityStatus_DELETED :: EntityStatus | |
pattern EntityStatus_FAILED :: EntityStatus | |
pattern EntityStatus_INPROGRESS :: EntityStatus | |
pattern EntityStatus_PENDING :: EntityStatus |
Instances
EvaluationFilterVariable
newtype EvaluationFilterVariable Source #
A list of the variables to use in searching or filtering Evaluation
.
CreatedAt
- Sets the search criteria toEvaluation
creation date.Status
- Sets the search criteria toEvaluation
status.Name
- Sets the search criteria to the contents ofEvaluation
____Name
.IAMUser
- Sets the search criteria to the user account that invoked an evaluation.MLModelId
- Sets the search criteria to thePredictor
that was evaluated.DataSourceId
- Sets the search criteria to theDataSource
used in evaluation.DataUri
- Sets the search criteria to the data file(s) used in evaluation. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
Instances
MLModelFilterVariable
newtype MLModelFilterVariable Source #
Instances
MLModelType
newtype MLModelType Source #
pattern MLModelType_BINARY :: MLModelType | |
pattern MLModelType_MULTICLASS :: MLModelType | |
pattern MLModelType_REGRESSION :: MLModelType |
Instances
RealtimeEndpointStatus
newtype RealtimeEndpointStatus Source #
pattern RealtimeEndpointStatus_FAILED :: RealtimeEndpointStatus | |
pattern RealtimeEndpointStatus_NONE :: RealtimeEndpointStatus | |
pattern RealtimeEndpointStatus_READY :: RealtimeEndpointStatus | |
pattern RealtimeEndpointStatus_UPDATING :: RealtimeEndpointStatus |
Instances
SortOrder
The sort order specified in a listing condition. Possible values include the following:
asc
- Present the information in ascending order (from A-Z).dsc
- Present the information in descending order (from Z-A).
pattern SortOrder_Asc :: SortOrder | |
pattern SortOrder_Dsc :: SortOrder |
Instances
TaggableResourceType
newtype TaggableResourceType Source #
Instances
BatchPrediction
data BatchPrediction Source #
Represents the output of a GetBatchPrediction
operation.
The content consists of the detailed metadata, the status, and the data
file information of a Batch Prediction
.
See: newBatchPrediction
smart constructor.
BatchPrediction' | |
|
Instances
newBatchPrediction :: BatchPrediction Source #
Create a value of BatchPrediction
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:status:BatchPrediction'
, batchPrediction_status
- The status of the BatchPrediction
. This element can have one of the
following values:
PENDING
- Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.INPROGRESS
- The process is underway.FAILED
- The request to perform a batch prediction did not run to completion. It is not usable.COMPLETED
- The batch prediction process completed successfully.DELETED
- TheBatchPrediction
is marked as deleted. It is not usable.
$sel:lastUpdatedAt:BatchPrediction'
, batchPrediction_lastUpdatedAt
- The time of the most recent edit to the BatchPrediction
. The time is
expressed in epoch time.
$sel:createdAt:BatchPrediction'
, batchPrediction_createdAt
- The time that the BatchPrediction
was created. The time is expressed
in epoch time.
$sel:computeTime:BatchPrediction'
, batchPrediction_computeTime
- Undocumented member.
$sel:inputDataLocationS3:BatchPrediction'
, batchPrediction_inputDataLocationS3
- The location of the data file or directory in Amazon Simple Storage
Service (Amazon S3).
$sel:mLModelId:BatchPrediction'
, batchPrediction_mLModelId
- The ID of the MLModel
that generated predictions for the
BatchPrediction
request.
$sel:batchPredictionDataSourceId:BatchPrediction'
, batchPrediction_batchPredictionDataSourceId
- The ID of the DataSource
that points to the group of observations to
predict.
$sel:totalRecordCount:BatchPrediction'
, batchPrediction_totalRecordCount
- Undocumented member.
$sel:startedAt:BatchPrediction'
, batchPrediction_startedAt
- Undocumented member.
$sel:batchPredictionId:BatchPrediction'
, batchPrediction_batchPredictionId
- The ID assigned to the BatchPrediction
at creation. This value should
be identical to the value of the BatchPredictionID
in the request.
$sel:finishedAt:BatchPrediction'
, batchPrediction_finishedAt
- Undocumented member.
$sel:invalidRecordCount:BatchPrediction'
, batchPrediction_invalidRecordCount
- Undocumented member.
$sel:createdByIamUser:BatchPrediction'
, batchPrediction_createdByIamUser
- The AWS user account that invoked the BatchPrediction
. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
$sel:name:BatchPrediction'
, batchPrediction_name
- A user-supplied name or description of the BatchPrediction
.
$sel:message:BatchPrediction'
, batchPrediction_message
- A description of the most recent details about processing the batch
prediction request.
$sel:outputUri:BatchPrediction'
, batchPrediction_outputUri
- The location of an Amazon S3 bucket or directory to receive the
operation results. The following substrings are not allowed in the
s3 key
portion of the outputURI
field: ':', '//', '/./',
'/../'.
batchPrediction_status :: Lens' BatchPrediction (Maybe EntityStatus) Source #
The status of the BatchPrediction
. This element can have one of the
following values:
PENDING
- Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.INPROGRESS
- The process is underway.FAILED
- The request to perform a batch prediction did not run to completion. It is not usable.COMPLETED
- The batch prediction process completed successfully.DELETED
- TheBatchPrediction
is marked as deleted. It is not usable.
batchPrediction_lastUpdatedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
The time of the most recent edit to the BatchPrediction
. The time is
expressed in epoch time.
batchPrediction_createdAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
The time that the BatchPrediction
was created. The time is expressed
in epoch time.
batchPrediction_computeTime :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
batchPrediction_inputDataLocationS3 :: Lens' BatchPrediction (Maybe Text) Source #
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
batchPrediction_mLModelId :: Lens' BatchPrediction (Maybe Text) Source #
The ID of the MLModel
that generated predictions for the
BatchPrediction
request.
batchPrediction_batchPredictionDataSourceId :: Lens' BatchPrediction (Maybe Text) Source #
The ID of the DataSource
that points to the group of observations to
predict.
batchPrediction_totalRecordCount :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
batchPrediction_startedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
Undocumented member.
batchPrediction_batchPredictionId :: Lens' BatchPrediction (Maybe Text) Source #
The ID assigned to the BatchPrediction
at creation. This value should
be identical to the value of the BatchPredictionID
in the request.
batchPrediction_finishedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
Undocumented member.
batchPrediction_invalidRecordCount :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
batchPrediction_createdByIamUser :: Lens' BatchPrediction (Maybe Text) Source #
The AWS user account that invoked the BatchPrediction
. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
batchPrediction_name :: Lens' BatchPrediction (Maybe Text) Source #
A user-supplied name or description of the BatchPrediction
.
batchPrediction_message :: Lens' BatchPrediction (Maybe Text) Source #
A description of the most recent details about processing the batch prediction request.
batchPrediction_outputUri :: Lens' BatchPrediction (Maybe Text) Source #
The location of an Amazon S3 bucket or directory to receive the
operation results. The following substrings are not allowed in the
s3 key
portion of the outputURI
field: ':', '//', '/./',
'/../'.
DataSource
data DataSource Source #
Represents the output of the GetDataSource
operation.
The content consists of the detailed metadata and data file information
and the current status of the DataSource
.
See: newDataSource
smart constructor.
DataSource' | |
|
Instances
newDataSource :: DataSource Source #
Create a value of DataSource
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:status:DataSource'
, dataSource_status
- The current status of the DataSource
. This element can have one of the
following values:
- PENDING - Amazon Machine Learning (Amazon ML) submitted a request to
create a
DataSource
. - INPROGRESS - The creation process is underway.
- FAILED - The request to create a
DataSource
did not run to completion. It is not usable. - COMPLETED - The creation process completed successfully.
- DELETED - The
DataSource
is marked as deleted. It is not usable.
$sel:numberOfFiles:DataSource'
, dataSource_numberOfFiles
- The number of data files referenced by the DataSource
.
$sel:lastUpdatedAt:DataSource'
, dataSource_lastUpdatedAt
- The time of the most recent edit to the BatchPrediction
. The time is
expressed in epoch time.
$sel:createdAt:DataSource'
, dataSource_createdAt
- The time that the DataSource
was created. The time is expressed in
epoch time.
$sel:computeTime:DataSource'
, dataSource_computeTime
- Undocumented member.
$sel:dataSourceId:DataSource'
, dataSource_dataSourceId
- The ID that is assigned to the DataSource
during creation.
$sel:rDSMetadata:DataSource'
, dataSource_rDSMetadata
- Undocumented member.
$sel:dataSizeInBytes:DataSource'
, dataSource_dataSizeInBytes
- The total number of observations contained in the data files that the
DataSource
references.
$sel:startedAt:DataSource'
, dataSource_startedAt
- Undocumented member.
$sel:finishedAt:DataSource'
, dataSource_finishedAt
- Undocumented member.
$sel:createdByIamUser:DataSource'
, dataSource_createdByIamUser
- The AWS user account from which the DataSource
was created. The
account type can be either an AWS root account or an AWS Identity and
Access Management (IAM) user account.
$sel:name:DataSource'
, dataSource_name
- A user-supplied name or description of the DataSource
.
$sel:dataLocationS3:DataSource'
, dataSource_dataLocationS3
- The location and name of the data in Amazon Simple Storage Service
(Amazon S3) that is used by a DataSource
.
$sel:computeStatistics:DataSource'
, dataSource_computeStatistics
- The parameter is true
if statistics need to be generated from the
observation data.
$sel:message:DataSource'
, dataSource_message
- A description of the most recent details about creating the
DataSource
.
$sel:redshiftMetadata:DataSource'
, dataSource_redshiftMetadata
- Undocumented member.
$sel:dataRearrangement:DataSource'
, dataSource_dataRearrangement
- A JSON string that represents the splitting and rearrangement
requirement used when this DataSource
was created.
$sel:roleARN:DataSource'
, dataSource_roleARN
- Undocumented member.
dataSource_status :: Lens' DataSource (Maybe EntityStatus) Source #
The current status of the DataSource
. This element can have one of the
following values:
- PENDING - Amazon Machine Learning (Amazon ML) submitted a request to
create a
DataSource
. - INPROGRESS - The creation process is underway.
- FAILED - The request to create a
DataSource
did not run to completion. It is not usable. - COMPLETED - The creation process completed successfully.
- DELETED - The
DataSource
is marked as deleted. It is not usable.
dataSource_numberOfFiles :: Lens' DataSource (Maybe Integer) Source #
The number of data files referenced by the DataSource
.
dataSource_lastUpdatedAt :: Lens' DataSource (Maybe UTCTime) Source #
The time of the most recent edit to the BatchPrediction
. The time is
expressed in epoch time.
dataSource_createdAt :: Lens' DataSource (Maybe UTCTime) Source #
The time that the DataSource
was created. The time is expressed in
epoch time.
dataSource_computeTime :: Lens' DataSource (Maybe Integer) Source #
Undocumented member.
dataSource_dataSourceId :: Lens' DataSource (Maybe Text) Source #
The ID that is assigned to the DataSource
during creation.
dataSource_rDSMetadata :: Lens' DataSource (Maybe RDSMetadata) Source #
Undocumented member.
dataSource_dataSizeInBytes :: Lens' DataSource (Maybe Integer) Source #
The total number of observations contained in the data files that the
DataSource
references.
dataSource_startedAt :: Lens' DataSource (Maybe UTCTime) Source #
Undocumented member.
dataSource_finishedAt :: Lens' DataSource (Maybe UTCTime) Source #
Undocumented member.
dataSource_createdByIamUser :: Lens' DataSource (Maybe Text) Source #
The AWS user account from which the DataSource
was created. The
account type can be either an AWS root account or an AWS Identity and
Access Management (IAM) user account.
dataSource_name :: Lens' DataSource (Maybe Text) Source #
A user-supplied name or description of the DataSource
.
dataSource_dataLocationS3 :: Lens' DataSource (Maybe Text) Source #
The location and name of the data in Amazon Simple Storage Service
(Amazon S3) that is used by a DataSource
.
dataSource_computeStatistics :: Lens' DataSource (Maybe Bool) Source #
The parameter is true
if statistics need to be generated from the
observation data.
dataSource_message :: Lens' DataSource (Maybe Text) Source #
A description of the most recent details about creating the
DataSource
.
dataSource_redshiftMetadata :: Lens' DataSource (Maybe RedshiftMetadata) Source #
Undocumented member.
dataSource_dataRearrangement :: Lens' DataSource (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement
requirement used when this DataSource
was created.
dataSource_roleARN :: Lens' DataSource (Maybe Text) Source #
Undocumented member.
Evaluation
data Evaluation Source #
Represents the output of GetEvaluation
operation.
The content consists of the detailed metadata and data file information
and the current status of the Evaluation
.
See: newEvaluation
smart constructor.
Evaluation' | |
|
Instances
newEvaluation :: Evaluation Source #
Create a value of Evaluation
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:status:Evaluation'
, evaluation_status
- The status of the evaluation. This element can have one of the following
values:
PENDING
- Amazon Machine Learning (Amazon ML) submitted a request to evaluate anMLModel
.INPROGRESS
- The evaluation is underway.FAILED
- The request to evaluate anMLModel
did not run to completion. It is not usable.COMPLETED
- The evaluation process completed successfully.DELETED
- TheEvaluation
is marked as deleted. It is not usable.
$sel:performanceMetrics:Evaluation'
, evaluation_performanceMetrics
- Measurements of how well the MLModel
performed, using observations
referenced by the DataSource
. One of the following metrics is
returned, based on the type of the MLModel
:
- BinaryAUC: A binary
MLModel
uses the Area Under the Curve (AUC) technique to measure performance. - RegressionRMSE: A regression
MLModel
uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. - MulticlassAvgFScore: A multiclass
MLModel
uses the F1 score technique to measure performance.
For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.
$sel:lastUpdatedAt:Evaluation'
, evaluation_lastUpdatedAt
- The time of the most recent edit to the Evaluation
. The time is
expressed in epoch time.
$sel:createdAt:Evaluation'
, evaluation_createdAt
- The time that the Evaluation
was created. The time is expressed in
epoch time.
$sel:computeTime:Evaluation'
, evaluation_computeTime
- Undocumented member.
$sel:inputDataLocationS3:Evaluation'
, evaluation_inputDataLocationS3
- The location and name of the data in Amazon Simple Storage Server
(Amazon S3) that is used in the evaluation.
$sel:mLModelId:Evaluation'
, evaluation_mLModelId
- The ID of the MLModel
that is the focus of the evaluation.
$sel:startedAt:Evaluation'
, evaluation_startedAt
- Undocumented member.
$sel:finishedAt:Evaluation'
, evaluation_finishedAt
- Undocumented member.
$sel:createdByIamUser:Evaluation'
, evaluation_createdByIamUser
- The AWS user account that invoked the evaluation. The account type can
be either an AWS root account or an AWS Identity and Access Management
(IAM) user account.
$sel:name:Evaluation'
, evaluation_name
- A user-supplied name or description of the Evaluation
.
$sel:evaluationId:Evaluation'
, evaluation_evaluationId
- The ID that is assigned to the Evaluation
at creation.
$sel:message:Evaluation'
, evaluation_message
- A description of the most recent details about evaluating the MLModel
.
$sel:evaluationDataSourceId:Evaluation'
, evaluation_evaluationDataSourceId
- The ID of the DataSource
that is used to evaluate the MLModel
.
evaluation_status :: Lens' Evaluation (Maybe EntityStatus) Source #
The status of the evaluation. This element can have one of the following values:
PENDING
- Amazon Machine Learning (Amazon ML) submitted a request to evaluate anMLModel
.INPROGRESS
- The evaluation is underway.FAILED
- The request to evaluate anMLModel
did not run to completion. It is not usable.COMPLETED
- The evaluation process completed successfully.DELETED
- TheEvaluation
is marked as deleted. It is not usable.
evaluation_performanceMetrics :: Lens' Evaluation (Maybe PerformanceMetrics) Source #
Measurements of how well the MLModel
performed, using observations
referenced by the DataSource
. One of the following metrics is
returned, based on the type of the MLModel
:
- BinaryAUC: A binary
MLModel
uses the Area Under the Curve (AUC) technique to measure performance. - RegressionRMSE: A regression
MLModel
uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. - MulticlassAvgFScore: A multiclass
MLModel
uses the F1 score technique to measure performance.
For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.
evaluation_lastUpdatedAt :: Lens' Evaluation (Maybe UTCTime) Source #
The time of the most recent edit to the Evaluation
. The time is
expressed in epoch time.
evaluation_createdAt :: Lens' Evaluation (Maybe UTCTime) Source #
The time that the Evaluation
was created. The time is expressed in
epoch time.
evaluation_computeTime :: Lens' Evaluation (Maybe Integer) Source #
Undocumented member.
evaluation_inputDataLocationS3 :: Lens' Evaluation (Maybe Text) Source #
The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.
evaluation_mLModelId :: Lens' Evaluation (Maybe Text) Source #
The ID of the MLModel
that is the focus of the evaluation.
evaluation_startedAt :: Lens' Evaluation (Maybe UTCTime) Source #
Undocumented member.
evaluation_finishedAt :: Lens' Evaluation (Maybe UTCTime) Source #
Undocumented member.
evaluation_createdByIamUser :: Lens' Evaluation (Maybe Text) Source #
The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
evaluation_name :: Lens' Evaluation (Maybe Text) Source #
A user-supplied name or description of the Evaluation
.
evaluation_evaluationId :: Lens' Evaluation (Maybe Text) Source #
The ID that is assigned to the Evaluation
at creation.
evaluation_message :: Lens' Evaluation (Maybe Text) Source #
A description of the most recent details about evaluating the MLModel
.
evaluation_evaluationDataSourceId :: Lens' Evaluation (Maybe Text) Source #
The ID of the DataSource
that is used to evaluate the MLModel
.
MLModel
Represents the output of a GetMLModel
operation.
The content consists of the detailed metadata and the current status of
the MLModel
.
See: newMLModel
smart constructor.
MLModel' | |
|
Instances
newMLModel :: MLModel Source #
Create a value of MLModel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:status:MLModel'
, mLModel_status
- The current status of an MLModel
. This element can have one of the
following values:
PENDING
- Amazon Machine Learning (Amazon ML) submitted a request to create anMLModel
.INPROGRESS
- The creation process is underway.FAILED
- The request to create anMLModel
didn't run to completion. The model isn't usable.COMPLETED
- The creation process completed successfully.DELETED
- TheMLModel
is marked as deleted. It isn't usable.
$sel:lastUpdatedAt:MLModel'
, mLModel_lastUpdatedAt
- The time of the most recent edit to the MLModel
. The time is expressed
in epoch time.
$sel:trainingParameters:MLModel'
, mLModel_trainingParameters
- A list of the training parameters in the MLModel
. The list is
implemented as a map of key-value pairs.
The following is the current set of training parameters:
sgd.maxMLModelSizeInBytes
- The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.The value is an integer that ranges from
100000
to2147483648
. The default value is33554432
.sgd.maxPasses
- The number of times that the training process traverses the observations to build theMLModel
. The value is an integer that ranges from1
to10000
. The default value is10
.sgd.shuffleType
- Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values areauto
andnone
. The default value isnone
.sgd.l1RegularizationAmount
- The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as1.0E-08
.The value is a double that ranges from
0
toMAX_DOUBLE
. The default is to not use L1 normalization. This parameter can't be used whenL2
is specified. Use this parameter sparingly.sgd.l2RegularizationAmount
- The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as1.0E-08
.The value is a double that ranges from
0
toMAX_DOUBLE
. The default is to not use L2 normalization. This parameter can't be used whenL1
is specified. Use this parameter sparingly.
$sel:scoreThresholdLastUpdatedAt:MLModel'
, mLModel_scoreThresholdLastUpdatedAt
- The time of the most recent edit to the ScoreThreshold
. The time is
expressed in epoch time.
$sel:createdAt:MLModel'
, mLModel_createdAt
- The time that the MLModel
was created. The time is expressed in epoch
time.
$sel:computeTime:MLModel'
, mLModel_computeTime
- Undocumented member.
$sel:inputDataLocationS3:MLModel'
, mLModel_inputDataLocationS3
- The location of the data file or directory in Amazon Simple Storage
Service (Amazon S3).
$sel:mLModelId:MLModel'
, mLModel_mLModelId
- The ID assigned to the MLModel
at creation.
$sel:sizeInBytes:MLModel'
, mLModel_sizeInBytes
- Undocumented member.
$sel:startedAt:MLModel'
, mLModel_startedAt
- Undocumented member.
$sel:scoreThreshold:MLModel'
, mLModel_scoreThreshold
- Undocumented member.
$sel:finishedAt:MLModel'
, mLModel_finishedAt
- Undocumented member.
$sel:algorithm:MLModel'
, mLModel_algorithm
- The algorithm used to train the MLModel
. The following algorithm is
supported:
SGD
-- Stochastic gradient descent. The goal ofSGD
is to minimize the gradient of the loss function.
$sel:createdByIamUser:MLModel'
, mLModel_createdByIamUser
- The AWS user account from which the MLModel
was created. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
$sel:name:MLModel'
, mLModel_name
- A user-supplied name or description of the MLModel
.
$sel:endpointInfo:MLModel'
, mLModel_endpointInfo
- The current endpoint of the MLModel
.
$sel:trainingDataSourceId:MLModel'
, mLModel_trainingDataSourceId
- The ID of the training DataSource
. The CreateMLModel
operation uses
the TrainingDataSourceId
.
$sel:message:MLModel'
, mLModel_message
- A description of the most recent details about accessing the MLModel
.
$sel:mLModelType:MLModel'
, mLModel_mLModelType
- Identifies the MLModel
category. The following are the available
types:
REGRESSION
- Produces a numeric result. For example, "What price should a house be listed at?"BINARY
- Produces one of two possible results. For example, "Is this a child-friendly web site?".MULTICLASS
- Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
mLModel_status :: Lens' MLModel (Maybe EntityStatus) Source #
The current status of an MLModel
. This element can have one of the
following values:
PENDING
- Amazon Machine Learning (Amazon ML) submitted a request to create anMLModel
.INPROGRESS
- The creation process is underway.FAILED
- The request to create anMLModel
didn't run to completion. The model isn't usable.COMPLETED
- The creation process completed successfully.DELETED
- TheMLModel
is marked as deleted. It isn't usable.
mLModel_lastUpdatedAt :: Lens' MLModel (Maybe UTCTime) Source #
The time of the most recent edit to the MLModel
. The time is expressed
in epoch time.
mLModel_trainingParameters :: Lens' MLModel (Maybe (HashMap Text Text)) Source #
A list of the training parameters in the MLModel
. The list is
implemented as a map of key-value pairs.
The following is the current set of training parameters:
sgd.maxMLModelSizeInBytes
- The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.The value is an integer that ranges from
100000
to2147483648
. The default value is33554432
.sgd.maxPasses
- The number of times that the training process traverses the observations to build theMLModel
. The value is an integer that ranges from1
to10000
. The default value is10
.sgd.shuffleType
- Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values areauto
andnone
. The default value isnone
.sgd.l1RegularizationAmount
- The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as1.0E-08
.The value is a double that ranges from
0
toMAX_DOUBLE
. The default is to not use L1 normalization. This parameter can't be used whenL2
is specified. Use this parameter sparingly.sgd.l2RegularizationAmount
- The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as1.0E-08
.The value is a double that ranges from
0
toMAX_DOUBLE
. The default is to not use L2 normalization. This parameter can't be used whenL1
is specified. Use this parameter sparingly.
mLModel_scoreThresholdLastUpdatedAt :: Lens' MLModel (Maybe UTCTime) Source #
The time of the most recent edit to the ScoreThreshold
. The time is
expressed in epoch time.
mLModel_createdAt :: Lens' MLModel (Maybe UTCTime) Source #
The time that the MLModel
was created. The time is expressed in epoch
time.
mLModel_inputDataLocationS3 :: Lens' MLModel (Maybe Text) Source #
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
mLModel_algorithm :: Lens' MLModel (Maybe Algorithm) Source #
The algorithm used to train the MLModel
. The following algorithm is
supported:
SGD
-- Stochastic gradient descent. The goal ofSGD
is to minimize the gradient of the loss function.
mLModel_createdByIamUser :: Lens' MLModel (Maybe Text) Source #
The AWS user account from which the MLModel
was created. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
mLModel_name :: Lens' MLModel (Maybe Text) Source #
A user-supplied name or description of the MLModel
.
mLModel_endpointInfo :: Lens' MLModel (Maybe RealtimeEndpointInfo) Source #
The current endpoint of the MLModel
.
mLModel_trainingDataSourceId :: Lens' MLModel (Maybe Text) Source #
The ID of the training DataSource
. The CreateMLModel
operation uses
the TrainingDataSourceId
.
mLModel_message :: Lens' MLModel (Maybe Text) Source #
A description of the most recent details about accessing the MLModel
.
mLModel_mLModelType :: Lens' MLModel (Maybe MLModelType) Source #
Identifies the MLModel
category. The following are the available
types:
REGRESSION
- Produces a numeric result. For example, "What price should a house be listed at?"BINARY
- Produces one of two possible results. For example, "Is this a child-friendly web site?".MULTICLASS
- Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
PerformanceMetrics
data PerformanceMetrics Source #
Measurements of how well the MLModel
performed on known observations.
One of the following metrics is returned, based on the type of the
MLModel
:
- BinaryAUC: The binary
MLModel
uses the Area Under the Curve (AUC) technique to measure performance. - RegressionRMSE: The regression
MLModel
uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. - MulticlassAvgFScore: The multiclass
MLModel
uses the F1 score technique to measure performance.
For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.
See: newPerformanceMetrics
smart constructor.
Instances
newPerformanceMetrics :: PerformanceMetrics Source #
Create a value of PerformanceMetrics
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:properties:PerformanceMetrics'
, performanceMetrics_properties
- Undocumented member.
performanceMetrics_properties :: Lens' PerformanceMetrics (Maybe (HashMap Text Text)) Source #
Undocumented member.
Prediction
data Prediction Source #
The output from a Predict
operation:
Details
- Contains the following attributes:DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASS
DetailsAttributes.ALGORITHM - SGD
PredictedLabel
- Present for either aBINARY
orMULTICLASS
MLModel
request.PredictedScores
- Contains the raw classification score corresponding to each label.PredictedValue
- Present for aREGRESSION
MLModel
request.
See: newPrediction
smart constructor.
Prediction' | |
|
Instances
newPrediction :: Prediction Source #
Create a value of Prediction
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:predictedValue:Prediction'
, prediction_predictedValue
- The prediction value for REGRESSION
MLModel
.
$sel:predictedLabel:Prediction'
, prediction_predictedLabel
- The prediction label for either a BINARY
or MULTICLASS
MLModel
.
$sel:predictedScores:Prediction'
, prediction_predictedScores
- Undocumented member.
$sel:details:Prediction'
, prediction_details
- Undocumented member.
prediction_predictedValue :: Lens' Prediction (Maybe Double) Source #
The prediction value for REGRESSION
MLModel
.
prediction_predictedLabel :: Lens' Prediction (Maybe Text) Source #
The prediction label for either a BINARY
or MULTICLASS
MLModel
.
prediction_predictedScores :: Lens' Prediction (Maybe (HashMap Text Double)) Source #
Undocumented member.
prediction_details :: Lens' Prediction (Maybe (HashMap DetailsAttributes Text)) Source #
Undocumented member.
RDSDataSpec
data RDSDataSpec Source #
The data specification of an Amazon Relational Database Service (Amazon
RDS) DataSource
.
See: newRDSDataSpec
smart constructor.
RDSDataSpec' | |
|
Instances
:: RDSDatabase | |
-> Text | |
-> RDSDatabaseCredentials | |
-> Text | |
-> Text | |
-> Text | |
-> Text | |
-> RDSDataSpec |
Create a value of RDSDataSpec
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataSchemaUri:RDSDataSpec'
, rDSDataSpec_dataSchemaUri
- The Amazon S3 location of the DataSchema
.
$sel:dataSchema:RDSDataSpec'
, rDSDataSpec_dataSchema
- A JSON string that represents the schema for an Amazon RDS DataSource
.
The DataSchema
defines the structure of the observation data in the
data file(s) referenced in the DataSource
.
A DataSchema
is not required if you specify a DataSchemaUri
Define your DataSchema
as a series of key-value pairs. attributes
and excludedVariableNames
have an array of key-value pairs for their
value. Use the following format to define your DataSchema
.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
$sel:dataRearrangement:RDSDataSpec'
, rDSDataSpec_dataRearrangement
- A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource
. If the DataRearrangement
parameter is
not provided, all of the input data is used to create the Datasource
.
There are multiple parameters that control what data is used to create a datasource:
percentBegin
Use
percentBegin
to indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.percentEnd
Use
percentEnd
to indicate the end of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.complement
The
complement
parameter instructs Amazon ML to use the data that is not included in the range ofpercentBegin
topercentEnd
to create a datasource. Thecomplement
parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBegin
andpercentEnd
, along with thecomplement
parameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}
Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
strategy
To change how Amazon ML splits the data for a datasource, use the
strategy
parameter.The default value for the
strategy
parameter issequential
, meaning that Amazon ML takes all of the data records between thepercentBegin
andpercentEnd
parameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangement
lines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategy
parameter torandom
and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBegin
andpercentEnd
. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangement
lines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
$sel:databaseInformation:RDSDataSpec'
, rDSDataSpec_databaseInformation
- Describes the DatabaseName
and InstanceIdentifier
of an Amazon RDS
database.
$sel:selectSqlQuery:RDSDataSpec'
, rDSDataSpec_selectSqlQuery
- The query that is used to retrieve the observation data for the
DataSource
.
$sel:databaseCredentials:RDSDataSpec'
, rDSDataSpec_databaseCredentials
- The AWS Identity and Access Management (IAM) credentials that are used
connect to the Amazon RDS database.
$sel:s3StagingLocation:RDSDataSpec'
, rDSDataSpec_s3StagingLocation
- The Amazon S3 location for staging Amazon RDS data. The data retrieved
from Amazon RDS using SelectSqlQuery
is stored in this location.
$sel:resourceRole:RDSDataSpec'
, rDSDataSpec_resourceRole
- The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic
Compute Cloud (Amazon EC2) instance to carry out the copy operation from
Amazon RDS to an Amazon S3 task. For more information, see
Role templates
for data pipelines.
$sel:serviceRole:RDSDataSpec'
, rDSDataSpec_serviceRole
- The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service
to monitor the progress of the copy task from Amazon RDS to Amazon S3.
For more information, see
Role templates
for data pipelines.
$sel:subnetId:RDSDataSpec'
, rDSDataSpec_subnetId
- The subnet ID to be used to access a VPC-based RDS DB instance. This
attribute is used by Data Pipeline to carry out the copy task from
Amazon RDS to Amazon S3.
$sel:securityGroupIds:RDSDataSpec'
, rDSDataSpec_securityGroupIds
- The security group IDs to be used to access a VPC-based RDS DB instance.
Ensure that there are appropriate ingress rules set up to allow access
to the RDS DB instance. This attribute is used by Data Pipeline to carry
out the copy operation from Amazon RDS to an Amazon S3 task.
rDSDataSpec_dataSchemaUri :: Lens' RDSDataSpec (Maybe Text) Source #
The Amazon S3 location of the DataSchema
.
rDSDataSpec_dataSchema :: Lens' RDSDataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon RDS DataSource
.
The DataSchema
defines the structure of the observation data in the
data file(s) referenced in the DataSource
.
A DataSchema
is not required if you specify a DataSchemaUri
Define your DataSchema
as a series of key-value pairs. attributes
and excludedVariableNames
have an array of key-value pairs for their
value. Use the following format to define your DataSchema
.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
rDSDataSpec_dataRearrangement :: Lens' RDSDataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource
. If the DataRearrangement
parameter is
not provided, all of the input data is used to create the Datasource
.
There are multiple parameters that control what data is used to create a datasource:
percentBegin
Use
percentBegin
to indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.percentEnd
Use
percentEnd
to indicate the end of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.complement
The
complement
parameter instructs Amazon ML to use the data that is not included in the range ofpercentBegin
topercentEnd
to create a datasource. Thecomplement
parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBegin
andpercentEnd
, along with thecomplement
parameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}
Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
strategy
To change how Amazon ML splits the data for a datasource, use the
strategy
parameter.The default value for the
strategy
parameter issequential
, meaning that Amazon ML takes all of the data records between thepercentBegin
andpercentEnd
parameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangement
lines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategy
parameter torandom
and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBegin
andpercentEnd
. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangement
lines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
rDSDataSpec_databaseInformation :: Lens' RDSDataSpec RDSDatabase Source #
Describes the DatabaseName
and InstanceIdentifier
of an Amazon RDS
database.
rDSDataSpec_selectSqlQuery :: Lens' RDSDataSpec Text Source #
The query that is used to retrieve the observation data for the
DataSource
.
rDSDataSpec_databaseCredentials :: Lens' RDSDataSpec RDSDatabaseCredentials Source #
The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.
rDSDataSpec_s3StagingLocation :: Lens' RDSDataSpec Text Source #
The Amazon S3 location for staging Amazon RDS data. The data retrieved
from Amazon RDS using SelectSqlQuery
is stored in this location.
rDSDataSpec_resourceRole :: Lens' RDSDataSpec Text Source #
The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see Role templates for data pipelines.
rDSDataSpec_serviceRole :: Lens' RDSDataSpec Text Source #
The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
rDSDataSpec_subnetId :: Lens' RDSDataSpec Text Source #
The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.
rDSDataSpec_securityGroupIds :: Lens' RDSDataSpec [Text] Source #
The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.
RDSDatabase
data RDSDatabase Source #
The database details of an Amazon RDS database.
See: newRDSDatabase
smart constructor.
RDSDatabase' | |
|
Instances
Create a value of RDSDatabase
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:instanceIdentifier:RDSDatabase'
, rDSDatabase_instanceIdentifier
- The ID of an RDS DB instance.
$sel:databaseName:RDSDatabase'
, rDSDatabase_databaseName
- Undocumented member.
rDSDatabase_instanceIdentifier :: Lens' RDSDatabase Text Source #
The ID of an RDS DB instance.
rDSDatabase_databaseName :: Lens' RDSDatabase Text Source #
Undocumented member.
RDSDatabaseCredentials
data RDSDatabaseCredentials Source #
The database credentials to connect to a database on an RDS DB instance.
See: newRDSDatabaseCredentials
smart constructor.
Instances
newRDSDatabaseCredentials Source #
:: Text | |
-> Text | |
-> RDSDatabaseCredentials |
Create a value of RDSDatabaseCredentials
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:username:RDSDatabaseCredentials'
, rDSDatabaseCredentials_username
- Undocumented member.
$sel:password:RDSDatabaseCredentials'
, rDSDatabaseCredentials_password
- Undocumented member.
rDSDatabaseCredentials_username :: Lens' RDSDatabaseCredentials Text Source #
Undocumented member.
rDSDatabaseCredentials_password :: Lens' RDSDatabaseCredentials Text Source #
Undocumented member.
RDSMetadata
data RDSMetadata Source #
The datasource details that are specific to Amazon RDS.
See: newRDSMetadata
smart constructor.
RDSMetadata' | |
|
Instances
newRDSMetadata :: RDSMetadata Source #
Create a value of RDSMetadata
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:selectSqlQuery:RDSMetadata'
, rDSMetadata_selectSqlQuery
- The SQL query that is supplied during CreateDataSourceFromRDS. Returns
only if Verbose
is true in GetDataSourceInput
.
$sel:dataPipelineId:RDSMetadata'
, rDSMetadata_dataPipelineId
- The ID of the Data Pipeline instance that is used to carry to copy data
from Amazon RDS to Amazon S3. You can use the ID to find details about
the instance in the Data Pipeline console.
$sel:database:RDSMetadata'
, rDSMetadata_database
- The database details required to connect to an Amazon RDS.
$sel:databaseUserName:RDSMetadata'
, rDSMetadata_databaseUserName
- Undocumented member.
$sel:resourceRole:RDSMetadata'
, rDSMetadata_resourceRole
- The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2
instance to carry out the copy task from Amazon RDS to Amazon S3. For
more information, see
Role templates
for data pipelines.
$sel:serviceRole:RDSMetadata'
, rDSMetadata_serviceRole
- The role (DataPipelineDefaultRole) assumed by the Data Pipeline service
to monitor the progress of the copy task from Amazon RDS to Amazon S3.
For more information, see
Role templates
for data pipelines.
rDSMetadata_selectSqlQuery :: Lens' RDSMetadata (Maybe Text) Source #
The SQL query that is supplied during CreateDataSourceFromRDS. Returns
only if Verbose
is true in GetDataSourceInput
.
rDSMetadata_dataPipelineId :: Lens' RDSMetadata (Maybe Text) Source #
The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.
rDSMetadata_database :: Lens' RDSMetadata (Maybe RDSDatabase) Source #
The database details required to connect to an Amazon RDS.
rDSMetadata_databaseUserName :: Lens' RDSMetadata (Maybe Text) Source #
Undocumented member.
rDSMetadata_resourceRole :: Lens' RDSMetadata (Maybe Text) Source #
The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
rDSMetadata_serviceRole :: Lens' RDSMetadata (Maybe Text) Source #
The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
RealtimeEndpointInfo
data RealtimeEndpointInfo Source #
Describes the real-time endpoint information for an MLModel
.
See: newRealtimeEndpointInfo
smart constructor.
RealtimeEndpointInfo' | |
|
Instances
newRealtimeEndpointInfo :: RealtimeEndpointInfo Source #
Create a value of RealtimeEndpointInfo
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:createdAt:RealtimeEndpointInfo'
, realtimeEndpointInfo_createdAt
- The time that the request to create the real-time endpoint for the
MLModel
was received. The time is expressed in epoch time.
$sel:endpointUrl:RealtimeEndpointInfo'
, realtimeEndpointInfo_endpointUrl
- The URI that specifies where to send real-time prediction requests for
the MLModel
.
Note: The application must wait until the real-time endpoint is ready before using this URI.
$sel:endpointStatus:RealtimeEndpointInfo'
, realtimeEndpointInfo_endpointStatus
- The current status of the real-time endpoint for the MLModel
. This
element can have one of the following values:
NONE
- Endpoint does not exist or was previously deleted.READY
- Endpoint is ready to be used for real-time predictions.UPDATING
- Updating/creating the endpoint.
$sel:peakRequestsPerSecond:RealtimeEndpointInfo'
, realtimeEndpointInfo_peakRequestsPerSecond
- The maximum processing rate for the real-time endpoint for MLModel
,
measured in incoming requests per second.
realtimeEndpointInfo_createdAt :: Lens' RealtimeEndpointInfo (Maybe UTCTime) Source #
The time that the request to create the real-time endpoint for the
MLModel
was received. The time is expressed in epoch time.
realtimeEndpointInfo_endpointUrl :: Lens' RealtimeEndpointInfo (Maybe Text) Source #
The URI that specifies where to send real-time prediction requests for
the MLModel
.
Note: The application must wait until the real-time endpoint is ready before using this URI.
realtimeEndpointInfo_endpointStatus :: Lens' RealtimeEndpointInfo (Maybe RealtimeEndpointStatus) Source #
The current status of the real-time endpoint for the MLModel
. This
element can have one of the following values:
NONE
- Endpoint does not exist or was previously deleted.READY
- Endpoint is ready to be used for real-time predictions.UPDATING
- Updating/creating the endpoint.
realtimeEndpointInfo_peakRequestsPerSecond :: Lens' RealtimeEndpointInfo (Maybe Int) Source #
The maximum processing rate for the real-time endpoint for MLModel
,
measured in incoming requests per second.
RedshiftDataSpec
data RedshiftDataSpec Source #
Describes the data specification of an Amazon Redshift DataSource
.
See: newRedshiftDataSpec
smart constructor.
RedshiftDataSpec' | |
|
Instances
Create a value of RedshiftDataSpec
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataSchemaUri:RedshiftDataSpec'
, redshiftDataSpec_dataSchemaUri
- Describes the schema location for an Amazon Redshift DataSource
.
$sel:dataSchema:RedshiftDataSpec'
, redshiftDataSpec_dataSchema
- A JSON string that represents the schema for an Amazon Redshift
DataSource
. The DataSchema
defines the structure of the observation
data in the data file(s) referenced in the DataSource
.
A DataSchema
is not required if you specify a DataSchemaUri
.
Define your DataSchema
as a series of key-value pairs. attributes
and excludedVariableNames
have an array of key-value pairs for their
value. Use the following format to define your DataSchema
.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
$sel:dataRearrangement:RedshiftDataSpec'
, redshiftDataSpec_dataRearrangement
- A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource
. If the DataRearrangement
parameter is
not provided, all of the input data is used to create the Datasource
.
There are multiple parameters that control what data is used to create a datasource:
percentBegin
Use
percentBegin
to indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.percentEnd
Use
percentEnd
to indicate the end of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.complement
The
complement
parameter instructs Amazon ML to use the data that is not included in the range ofpercentBegin
topercentEnd
to create a datasource. Thecomplement
parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBegin
andpercentEnd
, along with thecomplement
parameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}
Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
strategy
To change how Amazon ML splits the data for a datasource, use the
strategy
parameter.The default value for the
strategy
parameter issequential
, meaning that Amazon ML takes all of the data records between thepercentBegin
andpercentEnd
parameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangement
lines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategy
parameter torandom
and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBegin
andpercentEnd
. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangement
lines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
$sel:databaseInformation:RedshiftDataSpec'
, redshiftDataSpec_databaseInformation
- Describes the DatabaseName
and ClusterIdentifier
for an Amazon
Redshift DataSource
.
$sel:selectSqlQuery:RedshiftDataSpec'
, redshiftDataSpec_selectSqlQuery
- Describes the SQL Query to execute on an Amazon Redshift database for an
Amazon Redshift DataSource
.
$sel:databaseCredentials:RedshiftDataSpec'
, redshiftDataSpec_databaseCredentials
- Describes AWS Identity and Access Management (IAM) credentials that are
used connect to the Amazon Redshift database.
$sel:s3StagingLocation:RedshiftDataSpec'
, redshiftDataSpec_s3StagingLocation
- Describes an Amazon S3 location to store the result set of the
SelectSqlQuery
query.
redshiftDataSpec_dataSchemaUri :: Lens' RedshiftDataSpec (Maybe Text) Source #
Describes the schema location for an Amazon Redshift DataSource
.
redshiftDataSpec_dataSchema :: Lens' RedshiftDataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon Redshift
DataSource
. The DataSchema
defines the structure of the observation
data in the data file(s) referenced in the DataSource
.
A DataSchema
is not required if you specify a DataSchemaUri
.
Define your DataSchema
as a series of key-value pairs. attributes
and excludedVariableNames
have an array of key-value pairs for their
value. Use the following format to define your DataSchema
.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
redshiftDataSpec_dataRearrangement :: Lens' RedshiftDataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource
. If the DataRearrangement
parameter is
not provided, all of the input data is used to create the Datasource
.
There are multiple parameters that control what data is used to create a datasource:
percentBegin
Use
percentBegin
to indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.percentEnd
Use
percentEnd
to indicate the end of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.complement
The
complement
parameter instructs Amazon ML to use the data that is not included in the range ofpercentBegin
topercentEnd
to create a datasource. Thecomplement
parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBegin
andpercentEnd
, along with thecomplement
parameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}
Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
strategy
To change how Amazon ML splits the data for a datasource, use the
strategy
parameter.The default value for the
strategy
parameter issequential
, meaning that Amazon ML takes all of the data records between thepercentBegin
andpercentEnd
parameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangement
lines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategy
parameter torandom
and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBegin
andpercentEnd
. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangement
lines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
redshiftDataSpec_databaseInformation :: Lens' RedshiftDataSpec RedshiftDatabase Source #
Describes the DatabaseName
and ClusterIdentifier
for an Amazon
Redshift DataSource
.
redshiftDataSpec_selectSqlQuery :: Lens' RedshiftDataSpec Text Source #
Describes the SQL Query to execute on an Amazon Redshift database for an
Amazon Redshift DataSource
.
redshiftDataSpec_databaseCredentials :: Lens' RedshiftDataSpec RedshiftDatabaseCredentials Source #
Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.
redshiftDataSpec_s3StagingLocation :: Lens' RedshiftDataSpec Text Source #
Describes an Amazon S3 location to store the result set of the
SelectSqlQuery
query.
RedshiftDatabase
data RedshiftDatabase Source #
Describes the database details required to connect to an Amazon Redshift database.
See: newRedshiftDatabase
smart constructor.
Instances
:: Text | |
-> Text | |
-> RedshiftDatabase |
Create a value of RedshiftDatabase
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:databaseName:RedshiftDatabase'
, redshiftDatabase_databaseName
- Undocumented member.
$sel:clusterIdentifier:RedshiftDatabase'
, redshiftDatabase_clusterIdentifier
- Undocumented member.
redshiftDatabase_databaseName :: Lens' RedshiftDatabase Text Source #
Undocumented member.
redshiftDatabase_clusterIdentifier :: Lens' RedshiftDatabase Text Source #
Undocumented member.
RedshiftDatabaseCredentials
data RedshiftDatabaseCredentials Source #
Describes the database credentials for connecting to a database on an Amazon Redshift cluster.
See: newRedshiftDatabaseCredentials
smart constructor.
Instances
Eq RedshiftDatabaseCredentials Source # | |
Read RedshiftDatabaseCredentials Source # | |
Show RedshiftDatabaseCredentials Source # | |
Generic RedshiftDatabaseCredentials Source # | |
NFData RedshiftDatabaseCredentials Source # | |
Defined in Amazonka.MachineLearning.Types.RedshiftDatabaseCredentials rnf :: RedshiftDatabaseCredentials -> () # | |
Hashable RedshiftDatabaseCredentials Source # | |
ToJSON RedshiftDatabaseCredentials Source # | |
type Rep RedshiftDatabaseCredentials Source # | |
Defined in Amazonka.MachineLearning.Types.RedshiftDatabaseCredentials type Rep RedshiftDatabaseCredentials = D1 ('MetaData "RedshiftDatabaseCredentials" "Amazonka.MachineLearning.Types.RedshiftDatabaseCredentials" "libZSservicesZSamazonka-mlZSamazonka-ml" 'False) (C1 ('MetaCons "RedshiftDatabaseCredentials'" 'PrefixI 'True) (S1 ('MetaSel ('Just "username") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 Text) :*: S1 ('MetaSel ('Just "password") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 Text))) |
newRedshiftDatabaseCredentials Source #
:: Text | |
-> Text | |
-> RedshiftDatabaseCredentials |
Create a value of RedshiftDatabaseCredentials
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:username:RedshiftDatabaseCredentials'
, redshiftDatabaseCredentials_username
- Undocumented member.
$sel:password:RedshiftDatabaseCredentials'
, redshiftDatabaseCredentials_password
- Undocumented member.
redshiftDatabaseCredentials_username :: Lens' RedshiftDatabaseCredentials Text Source #
Undocumented member.
redshiftDatabaseCredentials_password :: Lens' RedshiftDatabaseCredentials Text Source #
Undocumented member.
RedshiftMetadata
data RedshiftMetadata Source #
Describes the DataSource
details specific to Amazon Redshift.
See: newRedshiftMetadata
smart constructor.
RedshiftMetadata' | |
|
Instances
newRedshiftMetadata :: RedshiftMetadata Source #
Create a value of RedshiftMetadata
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:selectSqlQuery:RedshiftMetadata'
, redshiftMetadata_selectSqlQuery
- The SQL query that is specified during CreateDataSourceFromRedshift.
Returns only if Verbose
is true in GetDataSourceInput.
$sel:redshiftDatabase:RedshiftMetadata'
, redshiftMetadata_redshiftDatabase
- Undocumented member.
$sel:databaseUserName:RedshiftMetadata'
, redshiftMetadata_databaseUserName
- Undocumented member.
redshiftMetadata_selectSqlQuery :: Lens' RedshiftMetadata (Maybe Text) Source #
The SQL query that is specified during CreateDataSourceFromRedshift.
Returns only if Verbose
is true in GetDataSourceInput.
redshiftMetadata_redshiftDatabase :: Lens' RedshiftMetadata (Maybe RedshiftDatabase) Source #
Undocumented member.
redshiftMetadata_databaseUserName :: Lens' RedshiftMetadata (Maybe Text) Source #
Undocumented member.
S3DataSpec
data S3DataSpec Source #
Describes the data specification of a DataSource
.
See: newS3DataSpec
smart constructor.
S3DataSpec' | |
|
Instances
Create a value of S3DataSpec
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataSchema:S3DataSpec'
, s3DataSpec_dataSchema
- A JSON string that represents the schema for an Amazon S3 DataSource
.
The DataSchema
defines the structure of the observation data in the
data file(s) referenced in the DataSource
.
You must provide either the DataSchema
or the DataSchemaLocationS3
.
Define your DataSchema
as a series of key-value pairs. attributes
and excludedVariableNames
have an array of key-value pairs for their
value. Use the following format to define your DataSchema
.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
$sel:dataSchemaLocationS3:S3DataSpec'
, s3DataSpec_dataSchemaLocationS3
- Describes the schema location in Amazon S3. You must provide either the
DataSchema
or the DataSchemaLocationS3
.
$sel:dataRearrangement:S3DataSpec'
, s3DataSpec_dataRearrangement
- A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource
. If the DataRearrangement
parameter is
not provided, all of the input data is used to create the Datasource
.
There are multiple parameters that control what data is used to create a datasource:
percentBegin
Use
percentBegin
to indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.percentEnd
Use
percentEnd
to indicate the end of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.complement
The
complement
parameter instructs Amazon ML to use the data that is not included in the range ofpercentBegin
topercentEnd
to create a datasource. Thecomplement
parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBegin
andpercentEnd
, along with thecomplement
parameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}
Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
strategy
To change how Amazon ML splits the data for a datasource, use the
strategy
parameter.The default value for the
strategy
parameter issequential
, meaning that Amazon ML takes all of the data records between thepercentBegin
andpercentEnd
parameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangement
lines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategy
parameter torandom
and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBegin
andpercentEnd
. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangement
lines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
$sel:dataLocationS3:S3DataSpec'
, s3DataSpec_dataLocationS3
- The location of the data file(s) used by a DataSource
. The URI
specifies a data file or an Amazon Simple Storage Service (Amazon S3)
directory or bucket containing data files.
s3DataSpec_dataSchema :: Lens' S3DataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon S3 DataSource
.
The DataSchema
defines the structure of the observation data in the
data file(s) referenced in the DataSource
.
You must provide either the DataSchema
or the DataSchemaLocationS3
.
Define your DataSchema
as a series of key-value pairs. attributes
and excludedVariableNames
have an array of key-value pairs for their
value. Use the following format to define your DataSchema
.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
s3DataSpec_dataSchemaLocationS3 :: Lens' S3DataSpec (Maybe Text) Source #
Describes the schema location in Amazon S3. You must provide either the
DataSchema
or the DataSchemaLocationS3
.
s3DataSpec_dataRearrangement :: Lens' S3DataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource
. If the DataRearrangement
parameter is
not provided, all of the input data is used to create the Datasource
.
There are multiple parameters that control what data is used to create a datasource:
percentBegin
Use
percentBegin
to indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.percentEnd
Use
percentEnd
to indicate the end of the range of the data used to create the Datasource. If you do not includepercentBegin
andpercentEnd
, Amazon ML includes all of the data when creating the datasource.complement
The
complement
parameter instructs Amazon ML to use the data that is not included in the range ofpercentBegin
topercentEnd
to create a datasource. Thecomplement
parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBegin
andpercentEnd
, along with thecomplement
parameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}
Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
strategy
To change how Amazon ML splits the data for a datasource, use the
strategy
parameter.The default value for the
strategy
parameter issequential
, meaning that Amazon ML takes all of the data records between thepercentBegin
andpercentEnd
parameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangement
lines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategy
parameter torandom
and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBegin
andpercentEnd
. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangement
lines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
s3DataSpec_dataLocationS3 :: Lens' S3DataSpec Text Source #
The location of the data file(s) used by a DataSource
. The URI
specifies a data file or an Amazon Simple Storage Service (Amazon S3)
directory or bucket containing data files.
Tag
A custom key-value pair associated with an ML object, such as an ML model.
See: newTag
smart constructor.
Tag' | |
|
Instances
Eq Tag Source # | |
Read Tag Source # | |
Show Tag Source # | |
Generic Tag Source # | |
NFData Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag | |
Hashable Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag | |
ToJSON Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag | |
FromJSON Tag Source # | |
type Rep Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag type Rep Tag = D1 ('MetaData "Tag" "Amazonka.MachineLearning.Types.Tag" "libZSservicesZSamazonka-mlZSamazonka-ml" 'False) (C1 ('MetaCons "Tag'" 'PrefixI 'True) (S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "key") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)))) |
Create a value of Tag
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:value:Tag'
, tag_value
- An optional string, typically used to describe or define the tag. Valid
characters include Unicode letters, digits, white space, _, ., /, =, +,
-, %, and @.
$sel:key:Tag'
, tag_key
- A unique identifier for the tag. Valid characters include Unicode
letters, digits, white space, _, ., /, =, +, -, %, and @.