object NN extends NN
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- NN
- NN
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- Protected
Type Members
- sealed trait CNNDataFormat extends AnyRef
- sealed trait ConvPaddingMode extends AnyRef
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def addBias[T](value: Output[T], bias: Output[T], cNNDataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "AddBias")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- Definition Classes
- NN
- def addBiasGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- def addBiasHessian[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def batchNormalization[T](x: Output[T], mean: Output[T], variance: Output[T], offset: Option[Output[T]] = None, scale: Option[Output[T]] = None, epsilon: Output[T], name: String = "BatchNormalization")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @native() @HotSpotIntrinsicCandidate()
- def conv2D[T](input: Output[T], filter: Output[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true, name: String = "Conv2D")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def conv2DBackpropFilter[T](input: Output[T], filterSizes: Output[Int], outputGradient: Output[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true, name: String = "Conv2DBackpropFilter")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def conv2DBackpropInput[T](inputSizes: Output[Int], filter: Output[T], outputGradient: Output[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true, name: String = "Conv2DBackpropInput")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def conv2DGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- def crelu[T](input: Output[T], axis: Output[Int] = -1, name: String = "CReLU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Definition Classes
- NN
- def dropout[T, I](input: Output[T], keepProbability: Float, scaleOutput: Boolean = true, noiseShape: Output[I] = null, seed: Option[Int] = None, name: String = "Dropout")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- Definition Classes
- NN
- Annotations
- @throws(scala.this.throws.<init>$default$1[IllegalArgumentException])
- def dynamicDropout[T, I](input: Output[T], keepProbability: Output[T], scaleOutput: Boolean = true, noiseShape: Output[I] = null, seed: Option[Int] = None, name: String = "Dropout")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- Definition Classes
- NN
- def elu[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "ELU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[OL, T]): OL[T]
- Definition Classes
- NN
- def eluGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def eluHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def fusedBatchNormalization[T](x: Output[T], scale: Output[Float], offset: Output[Float], mean: Option[Output[Float]] = None, variance: Option[Output[Float]] = None, epsilon: Float = 0.0001f, dataFormat: CNNDataFormat = NWCFormat, isTraining: Boolean = true, name: String = "FusedBatchNormalization")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[Float], Output[Float], Output[Float], Output[Float])
- Definition Classes
- NN
- Annotations
- @throws(scala.this.throws.<init>$default$1[IllegalArgumentException])
- def fusedBatchNormalizationGradient[T](op: Op[(Output[T], Output[Float], Output[Float], Output[Float], Output[Float]), (Output[T], Output[Float], Output[Float], Output[Float], Output[Float])], outputGradient: (Output[T], Output[Float], Output[Float], Output[Float], Output[Float]))(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[Float], Output[Float], Output[Float], Output[Float])
- Attributes
- protected
- Definition Classes
- NN
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def inTopK[I](predictions: Output[Float], targets: Output[I], k: Output[I], name: String = "InTopK")(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): Output[Boolean]
- Definition Classes
- NN
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def l2Loss[T](input: Output[T], name: String = "L2Loss")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def l2LossGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def l2Normalize[T, I](x: Output[T], axes: Output[I], epsilon: Float = 1e-12f, name: String = "L2Normalize")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- Definition Classes
- NN
- def linear[T](x: Output[T], weights: Output[T], bias: Output[T] = null, name: String = "Linear")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Definition Classes
- NN
- def localResponseNormalization[T](input: Output[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f, name: String = "LocalResponseNormalization")(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Output[T]
- Definition Classes
- NN
- def logPoissonLoss[T](logPredictions: Output[T], targets: Output[T], computeFullLoss: Boolean = false, name: String = "LogPoissonLoss")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def logSoftmax[T](logits: Output[T], axis: Int = -1, name: String = "LogSoftmax")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def logSoftmaxGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def lrn[T](input: Output[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f, name: String = "LRN")(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Output[T]
- Definition Classes
- NN
- def lrnGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def maxPool[T](input: Output[T], windowSize: Output[Int], strides: Output[Int], padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "MaxPool")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- Definition Classes
- NN
- def maxPoolGrad[T](originalInput: Output[T], originalOutput: Output[T], outputGradient: Output[T], windowSize: Output[Int], strides: Output[Int], padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "MaxPoolGrad")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- Definition Classes
- NN
- def maxPoolGradGrad[T](originalInput: Output[T], originalOutput: Output[T], outputGradient: Output[T], windowSize: Output[Int], strides: Output[Int], padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "MaxPoolGradGrad")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- Definition Classes
- NN
- def maxPoolGradient[T](op: Op[(Output[T], Output[Int], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[Int], Output[Int])
- Attributes
- protected
- Definition Classes
- NN
- def maxPoolHessian[T](op: Op[(Output[T], Output[T], Output[T], Output[Int], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[T], Output[T], Output[Int], Output[Int])
- Attributes
- protected
- Definition Classes
- NN
- def maxPoolHessianGradient[T](op: Op[(Output[T], Output[T], Output[T], Output[Int], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[T], Output[T], Output[Int], Output[Int])
- Attributes
- protected
- Definition Classes
- NN
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def relu[T](input: Output[T], alpha: Float = 0.0f, name: String = "ReLU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Definition Classes
- NN
- def relu6[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "ReLU6")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[OL, T]): OL[T]
- Definition Classes
- NN
- def relu6Gradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def relu6Hessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- def reluGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def reluHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- def selu[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "SELU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[OL, T]): OL[T]
- Definition Classes
- NN
- def seluGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def seluHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- def sequenceLoss[T, L](logits: Output[T], labels: Output[L], lossFn: (Output[T], Output[L]) => Output[T], weights: Output[T] = null, averageAcrossTimeSteps: Boolean = true, averageAcrossBatch: Boolean = true, name: String = "SequenceLoss")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[L]): Output[T]
- Definition Classes
- NN
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidShapeException])
- def sigmoidCrossEntropy[T](logits: Output[T], labels: Output[T], weights: Output[T] = null, name: String = "SigmoidCrossEntropy")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def softmax[T](logits: Output[T], axis: Int = -1, name: String = "Softmax")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def softmaxCrossEntropy[T](logits: Output[T], labels: Output[T], axis: Int = -1, name: String = "SoftmaxCrossEntropy")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Definition Classes
- NN
- def softmaxCrossEntropyGradient[T](op: Op[(Output[T], Output[T]), (Output[T], Output[T])], outputGradient: (Output[T], Output[T]))(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- def softmaxGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def softmaxHelper[T](logits: Output[T], opType: String, axis: Int = -1, name: String = "Softmax")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def softplus[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "Softplus")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], ev: Aux[OL, T]): OL[T]
- Definition Classes
- NN
- def softplusGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def softplusHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[T])
- Attributes
- protected
- Definition Classes
- NN
- def softsign[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "Softsign")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], ev: Aux[OL, T]): OL[T]
- Definition Classes
- NN
- def softsignGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- Definition Classes
- NN
- def sparseSoftmaxCrossEntropy[T, I](logits: Output[T], labels: Output[I], axis: Int = -1, name: String = "SparseSoftmaxCrossEntropy")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- Definition Classes
- NN
- def sparseSoftmaxCrossEntropyGradient[T, I](op: Op[(Output[T], Output[I]), (Output[T], Output[T])], outputGradient: (Output[T], Output[T]))(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- Definition Classes
- NN
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def toString(): String
- Definition Classes
- AnyRef → Any
- def topK[T](input: Output[T], k: Output[Int], sorted: Boolean = true, name: String = "TopK")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[Int])
- Definition Classes
- NN
- def topKGradient[T](op: Op[(Output[T], Output[Int]), (Output[T], Output[Int])], outputGradient: (Output[T], Output[Int]))(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[Int])
- Attributes
- protected
- Definition Classes
- NN
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- object CNNDataFormat
- object ConvPaddingMode
- case object NCWFormat extends CNNDataFormat with Product with Serializable
- case object NWCFormat extends CNNDataFormat with Product with Serializable
- case object SameConvPadding extends ConvPaddingMode with Product with Serializable
- case object ValidConvPadding extends ConvPaddingMode with Product with Serializable