Source code for synapse.ml.recommendation.SAR

# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.


import sys
if sys.version >= '3':
    basestring = str

from pyspark import SparkContext, SQLContext
from pyspark.sql import DataFrame
from pyspark.ml.param.shared import *
from pyspark import keyword_only
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from synapse.ml.core.serialize.java_params_patch import *
from pyspark.ml.wrapper import JavaTransformer, JavaEstimator, JavaModel
from pyspark.ml.evaluation import JavaEvaluator
from pyspark.ml.common import inherit_doc
from synapse.ml.core.schema.Utils import *
from pyspark.ml.param import TypeConverters
from synapse.ml.core.schema.TypeConversionUtils import generateTypeConverter, complexTypeConverter
from synapse.ml.recommendation.SARModel import SARModel

[docs]@inherit_doc class SAR(ComplexParamsMixin, JavaMLReadable, JavaMLWritable, JavaEstimator): """ Args: activityTimeFormat (object): Time format for events, default: yyyy/MM/dd'T'h:mm:ss alpha (float): alpha for implicit preference blockSize (int): block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data. checkpointInterval (int): set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext coldStartStrategy (object): strategy for dealing with unknown or new users/items at prediction time. This may be useful in cross-validation or production scenarios, for handling user/item ids the model has not seen in the training data. Supported values: nan,drop. finalStorageLevel (object): StorageLevel for ALS model factors. implicitPrefs (bool): whether to use implicit preference intermediateStorageLevel (object): StorageLevel for intermediate datasets. Cannot be 'NONE'. itemCol (object): column name for item ids. Ids must be within the integer value range. maxIter (int): maximum number of iterations (>= 0) nonnegative (bool): whether to use nonnegative constraint for least squares numItemBlocks (int): number of item blocks numUserBlocks (int): number of user blocks predictionCol (object): prediction column name rank (int): rank of the factorization ratingCol (object): column name for ratings regParam (float): regularization parameter (>= 0) seed (long): random seed similarityFunction (object): Defines the similarity function to be used by the model. Lift favors serendipity, Co-occurrence favors predictability, and Jaccard is a nice compromise between the two. startTime (object): Set time custom now time if using historical data startTimeFormat (object): Format for start time supportThreshold (int): Minimum number of ratings per item timeCol (object): Time of activity timeDecayCoeff (int): Use to scale time decay coeff to different half life dur userCol (object): column name for user ids. Ids must be within the integer value range. """ activityTimeFormat = Param(Params._dummy(), "activityTimeFormat", "Time format for events, default: yyyy/MM/dd'T'h:mm:ss") alpha = Param(Params._dummy(), "alpha", "alpha for implicit preference", typeConverter=TypeConverters.toFloat) blockSize = Param(Params._dummy(), "blockSize", "block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data.", typeConverter=TypeConverters.toInt) checkpointInterval = Param(Params._dummy(), "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext", typeConverter=TypeConverters.toInt) coldStartStrategy = Param(Params._dummy(), "coldStartStrategy", "strategy for dealing with unknown or new users/items at prediction time. This may be useful in cross-validation or production scenarios, for handling user/item ids the model has not seen in the training data. Supported values: nan,drop.") finalStorageLevel = Param(Params._dummy(), "finalStorageLevel", "StorageLevel for ALS model factors.") implicitPrefs = Param(Params._dummy(), "implicitPrefs", "whether to use implicit preference", typeConverter=TypeConverters.toBoolean) intermediateStorageLevel = Param(Params._dummy(), "intermediateStorageLevel", "StorageLevel for intermediate datasets. Cannot be 'NONE'.") itemCol = Param(Params._dummy(), "itemCol", "column name for item ids. Ids must be within the integer value range.") maxIter = Param(Params._dummy(), "maxIter", "maximum number of iterations (>= 0)", typeConverter=TypeConverters.toInt) nonnegative = Param(Params._dummy(), "nonnegative", "whether to use nonnegative constraint for least squares", typeConverter=TypeConverters.toBoolean) numItemBlocks = Param(Params._dummy(), "numItemBlocks", "number of item blocks", typeConverter=TypeConverters.toInt) numUserBlocks = Param(Params._dummy(), "numUserBlocks", "number of user blocks", typeConverter=TypeConverters.toInt) predictionCol = Param(Params._dummy(), "predictionCol", "prediction column name") rank = Param(Params._dummy(), "rank", "rank of the factorization", typeConverter=TypeConverters.toInt) ratingCol = Param(Params._dummy(), "ratingCol", "column name for ratings") regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0)", typeConverter=TypeConverters.toFloat) seed = Param(Params._dummy(), "seed", "random seed") similarityFunction = Param(Params._dummy(), "similarityFunction", "Defines the similarity function to be used by the model. Lift favors serendipity, Co-occurrence favors predictability, and Jaccard is a nice compromise between the two.") startTime = Param(Params._dummy(), "startTime", "Set time custom now time if using historical data") startTimeFormat = Param(Params._dummy(), "startTimeFormat", "Format for start time") supportThreshold = Param(Params._dummy(), "supportThreshold", "Minimum number of ratings per item", typeConverter=TypeConverters.toInt) timeCol = Param(Params._dummy(), "timeCol", "Time of activity") timeDecayCoeff = Param(Params._dummy(), "timeDecayCoeff", "Use to scale time decay coeff to different half life dur", typeConverter=TypeConverters.toInt) userCol = Param(Params._dummy(), "userCol", "column name for user ids. Ids must be within the integer value range.") @keyword_only def __init__( self, java_obj=None, activityTimeFormat="yyyy/MM/dd'T'h:mm:ss", alpha=1.0, blockSize=4096, checkpointInterval=10, coldStartStrategy="nan", finalStorageLevel="MEMORY_AND_DISK", implicitPrefs=False, intermediateStorageLevel="MEMORY_AND_DISK", itemCol="item", maxIter=10, nonnegative=False, numItemBlocks=10, numUserBlocks=10, predictionCol="prediction", rank=10, ratingCol="rating", regParam=0.1, seed=356704333, similarityFunction="jaccard", startTime=None, startTimeFormat="EEE MMM dd HH:mm:ss Z yyyy", supportThreshold=4, timeCol="time", timeDecayCoeff=30, userCol="user" ): super(SAR, self).__init__() if java_obj is None: self._java_obj = self._new_java_obj("com.microsoft.azure.synapse.ml.recommendation.SAR", self.uid) else: self._java_obj = java_obj self._setDefault(activityTimeFormat="yyyy/MM/dd'T'h:mm:ss") self._setDefault(alpha=1.0) self._setDefault(blockSize=4096) self._setDefault(checkpointInterval=10) self._setDefault(coldStartStrategy="nan") self._setDefault(finalStorageLevel="MEMORY_AND_DISK") self._setDefault(implicitPrefs=False) self._setDefault(intermediateStorageLevel="MEMORY_AND_DISK") self._setDefault(itemCol="item") self._setDefault(maxIter=10) self._setDefault(nonnegative=False) self._setDefault(numItemBlocks=10) self._setDefault(numUserBlocks=10) self._setDefault(predictionCol="prediction") self._setDefault(rank=10) self._setDefault(ratingCol="rating") self._setDefault(regParam=0.1) self._setDefault(seed=356704333) self._setDefault(similarityFunction="jaccard") self._setDefault(startTimeFormat="EEE MMM dd HH:mm:ss Z yyyy") self._setDefault(supportThreshold=4) self._setDefault(timeCol="time") self._setDefault(timeDecayCoeff=30) self._setDefault(userCol="user") if hasattr(self, "_input_kwargs"): kwargs = self._input_kwargs else: kwargs = self.__init__._input_kwargs if java_obj is None: for k,v in kwargs.items(): if v is not None: getattr(self, "set" + k[0].upper() + k[1:])(v)
[docs] @keyword_only def setParams( self, activityTimeFormat="yyyy/MM/dd'T'h:mm:ss", alpha=1.0, blockSize=4096, checkpointInterval=10, coldStartStrategy="nan", finalStorageLevel="MEMORY_AND_DISK", implicitPrefs=False, intermediateStorageLevel="MEMORY_AND_DISK", itemCol="item", maxIter=10, nonnegative=False, numItemBlocks=10, numUserBlocks=10, predictionCol="prediction", rank=10, ratingCol="rating", regParam=0.1, seed=356704333, similarityFunction="jaccard", startTime=None, startTimeFormat="EEE MMM dd HH:mm:ss Z yyyy", supportThreshold=4, timeCol="time", timeDecayCoeff=30, userCol="user" ): """ Set the (keyword only) parameters """ if hasattr(self, "_input_kwargs"): kwargs = self._input_kwargs else: kwargs = self.__init__._input_kwargs return self._set(**kwargs)
[docs] @classmethod def read(cls): """ Returns an MLReader instance for this class. """ return JavaMMLReader(cls)
[docs] @staticmethod def getJavaPackage(): """ Returns package name String. """ return "com.microsoft.azure.synapse.ml.recommendation.SAR"
@staticmethod def _from_java(java_stage): module_name=SAR.__module__ module_name=module_name.rsplit(".", 1)[0] + ".SAR" return from_java(java_stage, module_name)
[docs] def setActivityTimeFormat(self, value): """ Args: activityTimeFormat: Time format for events, default: yyyy/MM/dd'T'h:mm:ss """ self._set(activityTimeFormat=value) return self
[docs] def setAlpha(self, value): """ Args: alpha: alpha for implicit preference """ self._set(alpha=value) return self
[docs] def setBlockSize(self, value): """ Args: blockSize: block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data. """ self._set(blockSize=value) return self
[docs] def setCheckpointInterval(self, value): """ Args: checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext """ self._set(checkpointInterval=value) return self
[docs] def setColdStartStrategy(self, value): """ Args: coldStartStrategy: strategy for dealing with unknown or new users/items at prediction time. This may be useful in cross-validation or production scenarios, for handling user/item ids the model has not seen in the training data. Supported values: nan,drop. """ self._set(coldStartStrategy=value) return self
[docs] def setFinalStorageLevel(self, value): """ Args: finalStorageLevel: StorageLevel for ALS model factors. """ self._set(finalStorageLevel=value) return self
[docs] def setImplicitPrefs(self, value): """ Args: implicitPrefs: whether to use implicit preference """ self._set(implicitPrefs=value) return self
[docs] def setIntermediateStorageLevel(self, value): """ Args: intermediateStorageLevel: StorageLevel for intermediate datasets. Cannot be 'NONE'. """ self._set(intermediateStorageLevel=value) return self
[docs] def setItemCol(self, value): """ Args: itemCol: column name for item ids. Ids must be within the integer value range. """ self._set(itemCol=value) return self
[docs] def setMaxIter(self, value): """ Args: maxIter: maximum number of iterations (>= 0) """ self._set(maxIter=value) return self
[docs] def setNonnegative(self, value): """ Args: nonnegative: whether to use nonnegative constraint for least squares """ self._set(nonnegative=value) return self
[docs] def setNumItemBlocks(self, value): """ Args: numItemBlocks: number of item blocks """ self._set(numItemBlocks=value) return self
[docs] def setNumUserBlocks(self, value): """ Args: numUserBlocks: number of user blocks """ self._set(numUserBlocks=value) return self
[docs] def setPredictionCol(self, value): """ Args: predictionCol: prediction column name """ self._set(predictionCol=value) return self
[docs] def setRank(self, value): """ Args: rank: rank of the factorization """ self._set(rank=value) return self
[docs] def setRatingCol(self, value): """ Args: ratingCol: column name for ratings """ self._set(ratingCol=value) return self
[docs] def setRegParam(self, value): """ Args: regParam: regularization parameter (>= 0) """ self._set(regParam=value) return self
[docs] def setSeed(self, value): """ Args: seed: random seed """ self._set(seed=value) return self
[docs] def setSimilarityFunction(self, value): """ Args: similarityFunction: Defines the similarity function to be used by the model. Lift favors serendipity, Co-occurrence favors predictability, and Jaccard is a nice compromise between the two. """ self._set(similarityFunction=value) return self
[docs] def setStartTime(self, value): """ Args: startTime: Set time custom now time if using historical data """ self._set(startTime=value) return self
[docs] def setStartTimeFormat(self, value): """ Args: startTimeFormat: Format for start time """ self._set(startTimeFormat=value) return self
[docs] def setSupportThreshold(self, value): """ Args: supportThreshold: Minimum number of ratings per item """ self._set(supportThreshold=value) return self
[docs] def setTimeCol(self, value): """ Args: timeCol: Time of activity """ self._set(timeCol=value) return self
[docs] def setTimeDecayCoeff(self, value): """ Args: timeDecayCoeff: Use to scale time decay coeff to different half life dur """ self._set(timeDecayCoeff=value) return self
[docs] def setUserCol(self, value): """ Args: userCol: column name for user ids. Ids must be within the integer value range. """ self._set(userCol=value) return self
[docs] def getActivityTimeFormat(self): """ Returns: activityTimeFormat: Time format for events, default: yyyy/MM/dd'T'h:mm:ss """ return self.getOrDefault(self.activityTimeFormat)
[docs] def getAlpha(self): """ Returns: alpha: alpha for implicit preference """ return self.getOrDefault(self.alpha)
[docs] def getBlockSize(self): """ Returns: blockSize: block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data. """ return self.getOrDefault(self.blockSize)
[docs] def getCheckpointInterval(self): """ Returns: checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext """ return self.getOrDefault(self.checkpointInterval)
[docs] def getColdStartStrategy(self): """ Returns: coldStartStrategy: strategy for dealing with unknown or new users/items at prediction time. This may be useful in cross-validation or production scenarios, for handling user/item ids the model has not seen in the training data. Supported values: nan,drop. """ return self.getOrDefault(self.coldStartStrategy)
[docs] def getFinalStorageLevel(self): """ Returns: finalStorageLevel: StorageLevel for ALS model factors. """ return self.getOrDefault(self.finalStorageLevel)
[docs] def getImplicitPrefs(self): """ Returns: implicitPrefs: whether to use implicit preference """ return self.getOrDefault(self.implicitPrefs)
[docs] def getIntermediateStorageLevel(self): """ Returns: intermediateStorageLevel: StorageLevel for intermediate datasets. Cannot be 'NONE'. """ return self.getOrDefault(self.intermediateStorageLevel)
[docs] def getItemCol(self): """ Returns: itemCol: column name for item ids. Ids must be within the integer value range. """ return self.getOrDefault(self.itemCol)
[docs] def getMaxIter(self): """ Returns: maxIter: maximum number of iterations (>= 0) """ return self.getOrDefault(self.maxIter)
[docs] def getNonnegative(self): """ Returns: nonnegative: whether to use nonnegative constraint for least squares """ return self.getOrDefault(self.nonnegative)
[docs] def getNumItemBlocks(self): """ Returns: numItemBlocks: number of item blocks """ return self.getOrDefault(self.numItemBlocks)
[docs] def getNumUserBlocks(self): """ Returns: numUserBlocks: number of user blocks """ return self.getOrDefault(self.numUserBlocks)
[docs] def getPredictionCol(self): """ Returns: predictionCol: prediction column name """ return self.getOrDefault(self.predictionCol)
[docs] def getRank(self): """ Returns: rank: rank of the factorization """ return self.getOrDefault(self.rank)
[docs] def getRatingCol(self): """ Returns: ratingCol: column name for ratings """ return self.getOrDefault(self.ratingCol)
[docs] def getRegParam(self): """ Returns: regParam: regularization parameter (>= 0) """ return self.getOrDefault(self.regParam)
[docs] def getSeed(self): """ Returns: seed: random seed """ return self.getOrDefault(self.seed)
[docs] def getSimilarityFunction(self): """ Returns: similarityFunction: Defines the similarity function to be used by the model. Lift favors serendipity, Co-occurrence favors predictability, and Jaccard is a nice compromise between the two. """ return self.getOrDefault(self.similarityFunction)
[docs] def getStartTime(self): """ Returns: startTime: Set time custom now time if using historical data """ return self.getOrDefault(self.startTime)
[docs] def getStartTimeFormat(self): """ Returns: startTimeFormat: Format for start time """ return self.getOrDefault(self.startTimeFormat)
[docs] def getSupportThreshold(self): """ Returns: supportThreshold: Minimum number of ratings per item """ return self.getOrDefault(self.supportThreshold)
[docs] def getTimeCol(self): """ Returns: timeCol: Time of activity """ return self.getOrDefault(self.timeCol)
[docs] def getTimeDecayCoeff(self): """ Returns: timeDecayCoeff: Use to scale time decay coeff to different half life dur """ return self.getOrDefault(self.timeDecayCoeff)
[docs] def getUserCol(self): """ Returns: userCol: column name for user ids. Ids must be within the integer value range. """ return self.getOrDefault(self.userCol)
def _create_model(self, java_model): try: model = SARModel(java_obj=java_model) model._transfer_params_from_java() except TypeError: model = SARModel._from_java(java_model) return model def _fit(self, dataset): java_model = self._fit_java(dataset) return self._create_model(java_model)