src.model.reward.accuracyMap module
- src.model.reward.accuracyMap.cFunc(confusion_matrix: Dict[str, int], current_params: float, previous_accuracy: float | None = None, previous_params: float | None = None, best_r: float = 1.0, not_your_fault_r: float = 0.8, lucky_r: float = 0.5, bad_choice_r: float = 0.2, **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.convert(val: Any, input_type: Type, Transformer: Callable) Any
- src.model.reward.accuracyMap.emulatedBlackHole(confusion_matrix: Dict[str, int], current_params: float, previous_accuracy: float | None = None, previous_params: float | None = None, gamma: float = 0.99, current_step: int = 0, **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.fastBlackHole(confusion_matrix: Dict[str, int], current_params: float, previous_accuracy: float | None = None, previous_params: float | None = None, gamma: float = 0.99, current_step: int = 0, **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.find_zone(value: float, zone_delimiter: List[float] | str = [0.0, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9]) int
- src.model.reward.accuracyMap.incdec(confusion_matrix: Dict[str, int], *args, previous_accuracy: float | None = None, bonus: int = 1, neutral: int = 0, malus: int = 0, delta: float = 0.0, **kwargs) Tuple[int, float]
- src.model.reward.accuracyMap.normalize(cm: Dict[str, int]) Dict[str, float]
- src.model.reward.accuracyMap.paramChange(confusion_matrix: Dict[str, int], current_params: float, previous_accuracy: float | None = None, previous_params: float | None = None, same_params_r: float = 1.0, improved_zone_r: float = 1.0, worst_zone_r: float = 0.2, otherwise_r: float = 0.8, **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.paramChangeEmu(confusion_matrix: Dict[str, int], current_params: float, previous_accuracy: float | None = None, previous_params: float | None = None, same_params_r: float = 1.0, improved_zone_r: float = 1.0, worst_zone_r: float = 0.25, otherwise_r: float = 0.5, **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.platoKeeper(confusion_matrix: Dict[str, int], current_params: float, previous_accuracy: float | None = None, previous_params: float | None = None, **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.uRewardLimit(confusion_matrix: Dict[str, int], TP_factor: int = 1, FP_factor: int = -1, TN_factor: int = 1, FN_factor: int = -1, Undecided_factor: int = -1, **kwargs) float
- src.model.reward.accuracyMap.uRewardLimitPositive(confusion_matrix: Dict[str, int], TP_factor: int | str = 1, FP_factor: int | str = -2, TN_factor: int | str = 1, FN_factor: int | str = -1, Undecided_factor: int | str = -1, **kwargs) float
- src.model.reward.accuracyMap.weightAccuracy(confusion_matrix: Dict[str, int], TP_factor: int | str = 1, FP_factor: int | str = 2, TN_factor: int | str = 1, FN_factor: int | str = 1, Undecided_factor: int | str = 1, **kwargs) float
- src.model.reward.accuracyMap.zoneAcc(accuracy: float, zone_bonus: List[int | float] = [-1, 0.0, 0.2, 0.4, 0.8, 1.0, 2.0], **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.zoneBlackHole(confusion_matrix: Dict[str, int], current_params: float, previous_accuracy: float | None = None, previous_params: float | None = None, **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.zoneBonusMalus(confusion_matrix: Dict[str, int], previous_accuracy: float | None = None, zone_delimiter: List[float] | str = [0.0, 0.5, 0.7, 0.8, 0.9, 0.95], zone_bonus: List[int | float] | str = [0, 1, 2, 3, 4, 5], zone_malus: List[int | float] | str = [-100, -100, -100, -100, -100, -100], **kwargs) Tuple[float, float]
- src.model.reward.accuracyMap.zoneCM(confusion_matrix: Dict[str, int], **kwargs) Tuple[int | float, float]
- src.model.reward.accuracyMap.zoneParams(param: float, zone_delimiter: List[float] | str = [0.0, 1.0, 3.0, 8.0], zone_bonus: List[int | float] = [0, 1, 2, 3], **kwargs) Tuple[int | float, float]