object NN extends NN
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- NN
- NN
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- Protected
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def addBias[T](value: Tensor[T], bias: Tensor[T], cNNDataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[T]
- Definition Classes
- NN
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @native() @HotSpotIntrinsicCandidate()
- def conv2D[T](input: Tensor[T], filter: Tensor[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def conv2DBackpropFilter[T](input: Tensor[T], filterSizes: Tensor[Int], outputGradient: Tensor[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def conv2DBackpropInput[T](inputSizes: Tensor[Int], filter: Tensor[T], outputGradient: Tensor[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def crelu[T](x: Tensor[T], axis: Tensor[Int] = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Tensor[T]
- Definition Classes
- NN
- def dropout[T, I](input: Tensor[T], keepProbability: Float, scaleOutput: Boolean = true, noiseShape: Tensor[I] = null, seed: Option[Int] = None)(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- NN
- def elu[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def inTopK[I](predictions: Tensor[Float], targets: Tensor[I], k: Tensor[I])(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): Tensor[Boolean]
- Definition Classes
- NN
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def l2Loss[T](input: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def l2Normalize[T](x: Tensor[T], axes: Tensor[Int], epsilon: Float = 1e-12f)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- NN
- def linear[T](x: Tensor[T], weights: Tensor[T], bias: Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- NN
- def localResponseNormalization[T](input: Tensor[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f)(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Tensor[T]
- Definition Classes
- NN
- def logPoissonLoss[T](logPredictions: Tensor[T], targets: Tensor[T], computeFullLoss: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def logSoftmax[T](logits: Tensor[T], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def lrn[T](input: Tensor[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f)(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Tensor[T]
- Definition Classes
- NN
- def maxPool[T](input: Tensor[T], windowSize: Seq[Int], stride1: Int, stride2: Int, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[T]
- Definition Classes
- NN
- def maxPoolGrad[T](originalInput: Tensor[T], originalOutput: Tensor[T], outputGradient: Tensor[T], windowSize: Seq[Int], stride1: Int, stride2: Int, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[T]
- Definition Classes
- NN
- def maxPoolGradGrad[T](originalInput: Tensor[T], originalOutput: Tensor[T], outputGradient: Tensor[T], windowSize: Seq[Int], stride1: Int, stride2: Int, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[T]
- Definition Classes
- NN
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def relu[T](x: Tensor[T], alpha: Float = 0.0f)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Tensor[T]
- Definition Classes
- NN
- def relu6[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- def selu[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- def sequenceLoss[T, I](logits: Tensor[T], labels: Tensor[I], weights: Tensor[T] = null, averageAcrossTimeSteps: Boolean = true, averageAcrossBatch: Boolean = true, lossFn: (Tensor[T], Tensor[I]) => Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- NN
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidShapeException])
- def sigmoidCrossEntropy[T](logits: Tensor[T], labels: Tensor[T], weights: Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def softmax[T](logits: Tensor[T], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def softmaxCrossEntropy[T](logits: Tensor[T], labels: Tensor[T], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Tensor[T]
- Definition Classes
- NN
- def softplus[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- def softsign[T](input: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Tensor[T]
- Definition Classes
- NN
- def sparseSoftmaxCrossEntropy[T, I](logits: Tensor[T], labels: Tensor[I], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- NN
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def toString(): String
- Definition Classes
- AnyRef → Any
- def topK[T](input: Tensor[T], k: Tensor[Int] = 1, sorted: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Tensor[T], Tensor[Int])
- Definition Classes
- NN
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])