object tfi extends API with API
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- tfi
- API
- API
- Random
- NN
- Math
- Cast
- Basic
- API
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- Protected
Type Members
- type AbortedException = jni.AbortedException
- Definition Classes
- API
- type AlreadyExistsException = jni.AlreadyExistsException
- Definition Classes
- API
- type CancelledException = jni.CancelledException
- Definition Classes
- API
- type CheckpointNotFoundException = core.exception.CheckpointNotFoundException
- Definition Classes
- API
- type DataLossException = jni.DataLossException
- Definition Classes
- API
- type DeadlineExceededException = jni.DeadlineExceededException
- Definition Classes
- API
- type DeviceSpecification = core.DeviceSpecification
- Definition Classes
- API
- type FailedPreconditionException = jni.FailedPreconditionException
- Definition Classes
- API
- type GraphMismatchException = core.exception.GraphMismatchException
- Definition Classes
- API
- type IllegalNameException = core.exception.IllegalNameException
- Definition Classes
- API
- type InternalException = jni.InternalException
- Definition Classes
- API
- type InvalidArgumentException = jni.InvalidArgumentException
- Definition Classes
- API
- type InvalidDataTypeException = core.exception.InvalidDataTypeException
- Definition Classes
- API
- type InvalidDeviceException = core.exception.InvalidDeviceException
- Definition Classes
- API
- type InvalidIndexerException = core.exception.InvalidIndexerException
- Definition Classes
- API
- type InvalidShapeException = core.exception.InvalidShapeException
- Definition Classes
- API
- type NotFoundException = jni.NotFoundException
- Definition Classes
- API
- type OpBuilderUsedException = core.exception.OpBuilderUsedException
- Definition Classes
- API
- type OutOfRangeException = jni.OutOfRangeException
- Definition Classes
- API
- type PermissionDeniedException = jni.PermissionDeniedException
- Definition Classes
- API
- type ResourceExhaustedException = jni.ResourceExhaustedException
- Definition Classes
- API
- type ShapeMismatchException = core.exception.ShapeMismatchException
- Definition Classes
- API
- type UnauthenticatedException = jni.UnauthenticatedException
- Definition Classes
- API
- type UnavailableException = jni.UnavailableException
- Definition Classes
- API
- type UnimplementedException = jni.UnimplementedException
- Definition Classes
- API
- type UnknownException = jni.UnknownException
- Definition Classes
- API
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- val AbortedException: core.exception.AbortedException.type
- Definition Classes
- API
- val AlreadyExistsException: core.exception.AlreadyExistsException.type
- Definition Classes
- API
- val CancelledException: core.exception.CancelledException.type
- Definition Classes
- API
- val CheckpointNotFoundException: core.exception.CheckpointNotFoundException.type
- Definition Classes
- API
- val DataLossException: core.exception.DataLossException.type
- Definition Classes
- API
- val DeadlineExceededException: core.exception.DeadlineExceededException.type
- Definition Classes
- API
- val FailedPreconditionException: core.exception.FailedPreconditionException.type
- Definition Classes
- API
- val GraphMismatchException: core.exception.GraphMismatchException.type
- Definition Classes
- API
- val IllegalNameException: core.exception.IllegalNameException.type
- Definition Classes
- API
- val InternalException: core.exception.InternalException.type
- Definition Classes
- API
- val InvalidArgumentException: core.exception.InvalidArgumentException.type
- Definition Classes
- API
- val InvalidDataTypeException: core.exception.InvalidDataTypeException.type
- Definition Classes
- API
- val InvalidDeviceException: core.exception.InvalidDeviceException.type
- Definition Classes
- API
- val InvalidIndexerException: core.exception.InvalidIndexerException.type
- Definition Classes
- API
- val InvalidShapeException: core.exception.InvalidShapeException.type
- Definition Classes
- API
- val NotFoundException: core.exception.NotFoundException.type
- Definition Classes
- API
- val OpBuilderUsedException: core.exception.OpBuilderUsedException.type
- Definition Classes
- API
- val OutOfRangeException: core.exception.OutOfRangeException.type
- Definition Classes
- API
- val PermissionDeniedException: core.exception.PermissionDeniedException.type
- Definition Classes
- API
- val ResourceExhaustedException: core.exception.ResourceExhaustedException.type
- Definition Classes
- API
- val ShapeMismatchException: core.exception.ShapeMismatchException.type
- Definition Classes
- API
- val Timeline: core.client.Timeline.type
- Definition Classes
- API
- val UnauthenticatedException: core.exception.UnauthenticatedException.type
- Definition Classes
- API
- val UnavailableException: core.exception.UnavailableException.type
- Definition Classes
- API
- val UnimplementedException: core.exception.UnimplementedException.type
- Definition Classes
- API
- val UnknownException: core.exception.UnknownException.type
- Definition Classes
- API
- def abs[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def acos[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def acosh[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def add[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def addBias[T](value: tensors.Tensor[T], bias: tensors.Tensor[T], cNNDataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def addN[T](inputs: Seq[tensors.Tensor[T]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def all[I](input: tensors.Tensor[Boolean], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: IntDefault[I], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def angleDouble[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Angle")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
- def angleFloat[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Angle")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
- def any[I](input: tensors.Tensor[Boolean], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: IntDefault[I], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def approximatelyEqual[T](x: tensors.Tensor[T], y: tensors.Tensor[T], tolerance: Float = 0.00001f)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def argmax[T, I, IR](input: tensors.Tensor[T], axes: tensors.Tensor[I], outputDataType: core.types.DataType[IR])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I], arg4: core.types.TF[IR], arg5: core.types.IsIntOrLong[IR]): tensors.Tensor[IR]
- Definition Classes
- Math
- def argmax[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[Long]
- Definition Classes
- Math
- def argmin[T, I, IR](input: tensors.Tensor[T], axes: tensors.Tensor[I], outputDataType: core.types.DataType[IR])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I], arg4: core.types.TF[IR], arg5: core.types.IsIntOrLong[IR]): tensors.Tensor[IR]
- Definition Classes
- Math
- def argmin[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[Long]
- Definition Classes
- Math
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def asin[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def asinh[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def atan[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def atan2[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def atanh[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def batchToSpace[T, I](input: tensors.Tensor[T], blockSize: Int, crops: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def batchToSpaceND[T, I1, I2](input: tensors.Tensor[T], blockShape: tensors.Tensor[I1], crops: tensors.Tensor[I2])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I1], arg2: core.types.IsIntOrLong[I1], arg3: core.types.TF[I2], arg4: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Basic
- def binCount[T](input: tensors.Tensor[Int], dataType: core.types.DataType[T], weights: tensors.Tensor[T] = null, minLength: tensors.Tensor[Int] = null, maxLength: tensors.Tensor[Int] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsIntOrLongOrFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def booleanMask[T](input: tensors.Tensor[T], mask: tensors.Tensor[Boolean])(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def bucketize[T](input: tensors.Tensor[T], boundaries: Seq[Float])(implicit arg0: core.types.TF[T], arg1: core.types.IsIntOrLongOrFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def ceil[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def checkNumerics[T](input: tensors.Tensor[T], message: String = "")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @native() @HotSpotIntrinsicCandidate()
- def complexDouble(real: tensors.Tensor[Double], imag: tensors.Tensor[Double]): tensors.Tensor[core.types.ComplexDouble]
- Definition Classes
- Math
- def complexFloat(real: tensors.Tensor[Float], imag: tensors.Tensor[Float]): tensors.Tensor[core.types.ComplexFloat]
- Definition Classes
- Math
- def concatenate[T](inputs: Seq[tensors.Tensor[T]], axis: tensors.Tensor[Int] = 0)(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def conjugate[T, TL[A] <: tensors.TensorLike[A]](input: TL[T])(implicit arg0: core.types.TF[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def conv2D[T](input: tensors.Tensor[T], filter: tensors.Tensor[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def conv2DBackpropFilter[T](input: tensors.Tensor[T], filterSizes: tensors.Tensor[Int], outputGradient: tensors.Tensor[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def conv2DBackpropInput[T](inputSizes: tensors.Tensor[Int], filter: tensors.Tensor[T], outputGradient: tensors.Tensor[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def cos[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def cosh[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def countNonZero[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): tensors.Tensor[Long]
- Definition Classes
- Math
- def crelu[T](x: tensors.Tensor[T], axis: tensors.Tensor[Int] = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def cross[T](a: tensors.Tensor[T], b: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def cumprod[T, I](input: tensors.Tensor[T], axis: tensors.Tensor[I], exclusive: Boolean = false, reverse: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def cumsum[T, I](input: tensors.Tensor[T], axis: tensors.Tensor[I], exclusive: Boolean = false, reverse: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def depthToSpace[T](input: tensors.Tensor[T], blockSize: Int, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def diag[T](diagonal: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def diagPart[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def digamma[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def divide[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def dropout[T, I](input: tensors.Tensor[T], keepProbability: Float, scaleOutput: Boolean = true, noiseShape: tensors.Tensor[I] = null, seed: Option[Int] = None)(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- NN
- def editDistance[T](hypothesis: tensors.SparseTensor[T], truth: tensors.SparseTensor[T], normalize: Boolean = true)(implicit arg0: core.types.TF[T]): tensors.Tensor[Float]
- Definition Classes
- Basic
- def elu[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equal[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def erf[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def erfc[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def exp[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def expandDims[T, I](input: tensors.Tensor[T], axis: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def expm1[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def floor[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def floorMod[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def gather[T, I1, I2](input: tensors.Tensor[T], indices: tensors.Tensor[I1], axis: tensors.Tensor[I2])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I1], arg2: core.types.IsIntOrLong[I1], arg3: core.types.TF[I2], arg4: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Basic
- def gather[T, I1](input: tensors.Tensor[T], indices: tensors.Tensor[I1])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I1], arg2: core.types.IsIntOrLong[I1]): tensors.Tensor[T]
- Definition Classes
- Basic
- def gatherND[T, I](input: tensors.Tensor[T], indices: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def greater[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def greaterEqual[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def igamma[T](a: tensors.Tensor[T], x: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def igammac[T](a: tensors.Tensor[T], x: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def imagDouble[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Imag")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
- def imagFloat[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Imag")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
- def inTopK[I](predictions: tensors.Tensor[Float], targets: tensors.Tensor[I], k: tensors.Tensor[I])(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): tensors.Tensor[Boolean]
- Definition Classes
- NN
- def incompleteBeta[T](a: tensors.Tensor[T], b: tensors.Tensor[T], x: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def indexedSlicesMask[T](input: tensors.TensorIndexedSlices[T], maskIndices: tensors.Tensor[Int])(implicit arg0: core.types.TF[T]): tensors.TensorIndexedSlices[T]
- Definition Classes
- Basic
- Annotations
- @throws(scala.this.throws.<init>$default$1[IllegalArgumentException])
- def invertPermutation[I](input: tensors.Tensor[I])(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): tensors.Tensor[I]
- Definition Classes
- Basic
- def isFinite[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[Boolean]
- Definition Classes
- Math
- def isInf[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[Boolean]
- Definition Classes
- Math
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def isNaN[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[Boolean]
- Definition Classes
- Math
- def l2Loss[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def l2Normalize[T](x: tensors.Tensor[T], axes: tensors.Tensor[Int], epsilon: Float = 1e-12f)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def less[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def lessEqual[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def linear[T](x: tensors.Tensor[T], weights: tensors.Tensor[T], bias: tensors.Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def linspace[T, I](start: tensors.Tensor[T], stop: tensors.Tensor[T], numberOfValues: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrFloatOrDouble[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def listDiff[T, I](x: tensors.Tensor[T], y: tensors.Tensor[T], indicesDataType: core.types.DataType[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): (tensors.Tensor[T], tensors.Tensor[I])
- Definition Classes
- Basic
- def localResponseNormalization[T](input: tensors.Tensor[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f)(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def log[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def log1p[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def logGamma[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def logPoissonLoss[T](logPredictions: tensors.Tensor[T], targets: tensors.Tensor[T], computeFullLoss: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def logSigmoid[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def logSoftmax[T](logits: tensors.Tensor[T], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def logSumExp[T](input: tensors.Tensor[T], axes: Seq[Int] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def logicalAnd(x: tensors.Tensor[Boolean], y: tensors.Tensor[Boolean]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def logicalNot(x: tensors.Tensor[Boolean]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def logicalOr(x: tensors.Tensor[Boolean], y: tensors.Tensor[Boolean]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def logicalXOr(x: tensors.Tensor[Boolean], y: tensors.Tensor[Boolean]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- def lrn[T](input: tensors.Tensor[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f)(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def magnitudeDouble[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Magnitude")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
- def magnitudeFloat[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Magnitude")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
- def matmul[T](a: tensors.Tensor[T], b: tensors.Tensor[T], transposeA: Boolean = false, transposeB: Boolean = false, conjugateA: Boolean = false, conjugateB: Boolean = false, aIsSparse: Boolean = false, bIsSparse: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def matrixBandPart[T, I](input: tensors.Tensor[T], numSubDiagonals: tensors.Tensor[I], numSuperDiagonals: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def matrixDiag[T](diagonal: tensors.Tensor[T])(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def matrixDiagPart[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def matrixSetDiag[T](input: tensors.Tensor[T], diagonal: tensors.Tensor[T])(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def matrixTranspose[T](input: tensors.Tensor[T], conjugate: Boolean = false)(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidShapeException])
- def max[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def maxPool[T](input: tensors.Tensor[T], windowSize: Seq[Int], stride1: Int, stride2: Int, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def maxPoolGrad[T](originalInput: tensors.Tensor[T], originalOutput: tensors.Tensor[T], outputGradient: tensors.Tensor[T], windowSize: Seq[Int], stride1: Int, stride2: Int, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def maxPoolGradGrad[T](originalInput: tensors.Tensor[T], originalOutput: tensors.Tensor[T], outputGradient: tensors.Tensor[T], windowSize: Seq[Int], stride1: Int, stride2: Int, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def maximum[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def mean[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def min[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def minimum[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def mod[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def multiply[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def negate[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def notEqual[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T]): tensors.Tensor[Boolean]
- Definition Classes
- Math
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def pad[T, I](input: tensors.Tensor[T], paddings: tensors.Tensor[I], mode: ops.basic.Basic.PaddingMode = ConstantPadding(Some(Tensor(0))))(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def parallelStack[T](inputs: Seq[tensors.Tensor[T]])(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def polygamma[T](n: tensors.Tensor[T], x: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def pow[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def preventGradient[T](input: tensors.Tensor[T], message: String = "")(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def prod[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def randomShuffle[T](value: tensors.Tensor[T], seed: Option[Int] = None)(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Random
- def range[T](start: tensors.Tensor[T], limit: tensors.Tensor[T], delta: tensors.Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def rank[T <: tensors.TensorLike[_]](input: T): tensors.Tensor[Int]
- Definition Classes
- Basic
- def realDivide[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def realDouble[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Real")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
- def realFloat[TL[A] <: tensors.TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Real")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
- def reciprocal[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def relu[T](x: tensors.Tensor[T], alpha: Float = 0.0f)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def relu6[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- def requiredSpaceToBatchPaddingsAndCrops(inputShape: tensors.Tensor[Int], blockShape: tensors.Tensor[Int], basePaddings: tensors.Tensor[Int] = null): (tensors.Tensor[Int], tensors.Tensor[Int])
- Definition Classes
- Basic
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidShapeException])
- def reshape[T, I](input: tensors.Tensor[T], shape: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def reverse[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def reverseSequence[T, I](input: tensors.Tensor[T], sequenceLengths: tensors.Tensor[I], sequenceAxis: Int, batchAxis: Int = 0)(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def round[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def roundInt[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def rsqrt[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def scalarMul[T, TL[A] <: tensors.TensorLike[A]](scalar: tensors.Tensor[T], tensor: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def scatterND[T, I](indices: tensors.Tensor[I], updates: tensors.Tensor[T], shape: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def segmentMax[T, I](data: tensors.Tensor[T], segmentIndices: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def segmentMean[T, I](data: tensors.Tensor[T], segmentIndices: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def segmentMin[T, I](data: tensors.Tensor[T], segmentIndices: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def segmentProd[T, I](data: tensors.Tensor[T], segmentIndices: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def segmentSum[T, I](data: tensors.Tensor[T], segmentIndices: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- def select[T](condition: tensors.Tensor[Boolean], x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def selu[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- def sequenceLoss[T, I](logits: tensors.Tensor[T], labels: tensors.Tensor[I], weights: tensors.Tensor[T] = null, averageAcrossTimeSteps: Boolean = true, averageAcrossBatch: Boolean = true, lossFn: (tensors.Tensor[T], tensors.Tensor[I]) => tensors.Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- NN
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidShapeException])
- def sequenceMask[T](lengths: tensors.Tensor[T], maxLength: tensors.Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsIntOrUInt[T]): tensors.Tensor[Boolean]
- Definition Classes
- Basic
- Annotations
- @throws(scala.this.throws.<init>$default$1[IllegalArgumentException])
- def shape[T <: tensors.TensorLike[_]](input: T): tensors.Tensor[Int]
- Definition Classes
- Basic
- def shapeN(inputs: Seq[tensors.Tensor[_]]): Seq[tensors.Tensor[Int]]
- Definition Classes
- Basic
- def sigmoid[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def sigmoidCrossEntropy[T](logits: tensors.Tensor[T], labels: tensors.Tensor[T], weights: tensors.Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def sign[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def sin[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def sinh[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def size[T <: tensors.TensorLike[_]](input: T): tensors.Tensor[Long]
- Definition Classes
- Basic
- def softmax[T](logits: tensors.Tensor[T], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def softmaxCrossEntropy[T](logits: tensors.Tensor[T], labels: tensors.Tensor[T], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def softplus[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- NN
- def softsign[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): tensors.Tensor[T]
- Definition Classes
- NN
- def spaceToBatch[T, I](input: tensors.Tensor[T], blockSize: Int, paddings: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def spaceToBatchND[T, I1, I2](input: tensors.Tensor[T], blockShape: tensors.Tensor[I1], paddings: tensors.Tensor[I2])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I1], arg2: core.types.IsIntOrLong[I1], arg3: core.types.TF[I2], arg4: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Basic
- def spaceToDepth[T](input: tensors.Tensor[T], blockSize: Int, dataFormat: CNNDataFormat = CNNDataFormat.default)(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def sparseSegmentMean[T, I1, I2](data: tensors.Tensor[T], indices: tensors.Tensor[I1], segmentIndices: tensors.Tensor[Int], numSegments: tensors.Tensor[I2] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Math
- def sparseSegmentSum[T, I1, I2](data: tensors.Tensor[T], indices: tensors.Tensor[I1], segmentIndices: tensors.Tensor[Int], numSegments: tensors.Tensor[I2] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Math
- def sparseSegmentSumSqrtN[T, I1, I2](data: tensors.Tensor[T], indices: tensors.Tensor[I1], segmentIndices: tensors.Tensor[Int], numSegments: tensors.Tensor[I2] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Math
- def sparseSoftmaxCrossEntropy[T, I](logits: tensors.Tensor[T], labels: tensors.Tensor[I], axis: Int = -1)(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- NN
- def split[T, I](input: tensors.Tensor[T], splitSizes: tensors.Tensor[I], axis: tensors.Tensor[Int] = 0)(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Seq[tensors.Tensor[T]]
- Definition Classes
- Basic
- def splitEvenly[T](input: tensors.Tensor[T], numSplits: Int, axis: tensors.Tensor[Int] = 0)(implicit arg0: core.types.TF[T]): Seq[tensors.Tensor[T]]
- Definition Classes
- Basic
- def sqrt[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def square[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def squaredDifference[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def squeeze[T](input: tensors.Tensor[T], axes: Seq[Int] = null)(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def stack[T](inputs: Seq[tensors.Tensor[T]], axis: Int = 0)(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def stopGradient[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T]): tensors.Tensor[T]
- Definition Classes
- Basic
- def subtract[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def sum[T, I](input: tensors.Tensor[T], axes: tensors.Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Math
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def tan[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def tanh[T, TL[A] <: tensors.TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
- def tensorDot[T](a: tensors.Tensor[T], b: tensors.Tensor[T], axesA: tensors.Tensor[Int], axesB: tensors.Tensor[Int])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidShapeException])
- def tensorDot[T](a: tensors.Tensor[T], b: tensors.Tensor[T], numAxes: tensors.Tensor[Int])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidShapeException])
- def tile[T, I](input: tensors.Tensor[T], multiples: tensors.Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def toString(): String
- Definition Classes
- AnyRef → Any
- def topK[T](input: tensors.Tensor[T], k: tensors.Tensor[Int] = 1, sorted: Boolean = true)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (tensors.Tensor[T], tensors.Tensor[Int])
- Definition Classes
- NN
- def trace[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def transpose[T, I](input: tensors.Tensor[T], permutation: tensors.Tensor[I] = null, conjugate: Boolean = false)(implicit arg0: core.types.TF[T], arg1: IntDefault[I], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): tensors.Tensor[T]
- Definition Classes
- Basic
- def truncateDivide[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def truncateMod[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- def unique[T, I](input: tensors.Tensor[T], indicesDataType: core.types.DataType[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): (tensors.Tensor[T], tensors.Tensor[I])
- Definition Classes
- Basic
- def uniqueWithCounts[T, I](input: tensors.Tensor[T], indicesDataType: core.types.DataType[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): (tensors.Tensor[T], tensors.Tensor[I], tensors.Tensor[I])
- Definition Classes
- Basic
- def unsortedSegmentMax[T, I1, I2](data: tensors.Tensor[T], segmentIndices: tensors.Tensor[I1], segmentsNumber: tensors.Tensor[I2])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Math
- def unsortedSegmentSum[T, I1, I2](data: tensors.Tensor[T], segmentIndices: tensors.Tensor[I1], segmentsNumber: tensors.Tensor[I2])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): tensors.Tensor[T]
- Definition Classes
- Math
- def unstack[T](input: tensors.Tensor[T], number: Int = -1, axis: Int = 0)(implicit arg0: core.types.TF[T]): Seq[tensors.Tensor[T]]
- Definition Classes
- Basic
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- def where[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsBooleanOrNumeric[T]): tensors.Tensor[Long]
- Definition Classes
- Basic
- def zerosFraction[T](input: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): tensors.Tensor[Float]
- Definition Classes
- Math
- def zeta[T](x: tensors.Tensor[T], q: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): tensors.Tensor[T]
- Definition Classes
- Math
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated @deprecated
- Deprecated
(Since version ) see corresponding Javadoc for more information.
- def floorDivide[T](x: tensors.Tensor[T], y: tensors.Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): tensors.Tensor[T]
- Definition Classes
- Math
- Annotations
- @deprecated
- Deprecated
(Since version 0.1) Use
truncateDivide
instead.