trait Math extends AnyRef
Linear Supertypes
Known Subclasses
Ordering
- Alphabetic
- By Inheritance
Inherited
- Math
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- Protected
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def abs[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Abs")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[OL, T]): OL[T]
- def absGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
- def accumulateN[T](inputs: Seq[Output[T]], shape: core.Shape = null, name: String = "AccumulateN")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def accumulateNGradient[T](op: Op[Seq[Output[T]], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Seq[Output[T]]
- Attributes
- protected
- def acos[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Acos")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def acosGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def acosh[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "ACosh")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def acoshGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def add[T](x: Output[T], y: Output[T], name: String = "Add")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def addGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def addN[T](inputs: Seq[Output[T]], name: String = "AddN")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- def addNGradient[T](op: Op[Seq[Output[T]], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Seq[Output[T]]
- Attributes
- protected
- def all[I](input: Output[Boolean], axes: Output[I] = null, keepDims: Boolean = false, name: String = "All")(implicit arg0: IntDefault[I], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Output[Boolean]
- def angleDouble[OL[A] <: OutputLike[A]](input: OL[core.types.ComplexDouble], name: String = "Angle")(implicit ev: Aux[OL, core.types.ComplexDouble]): OL[Double]
- def angleFloat[OL[A] <: OutputLike[A]](input: OL[core.types.ComplexFloat], name: String = "Angle")(implicit ev: Aux[OL, core.types.ComplexFloat]): OL[Float]
- def any[I](input: Output[Boolean], axes: Output[I] = null, keepDims: Boolean = false, name: String = "Any")(implicit arg0: IntDefault[I], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Output[Boolean]
- def approximatelyEqual[T](x: Output[T], y: Output[T], tolerance: Float = 0.00001f, name: String = "ApproximatelyEqual")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[Boolean]
- def argmax[T, I, R](input: Output[T], axes: Output[I], outputDataType: core.types.DataType[R], name: String = "ArgMax")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I], arg4: core.types.TF[R]): Output[R]
- def argmin[T, I, R](input: Output[T], axes: Output[I], outputDataType: core.types.DataType[R], name: String = "ArgMin")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I], arg4: core.types.TF[R]): Output[R]
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def asin[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Asin")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def asinGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def asinh[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "ASinh")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def asinhGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def atan[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Atan")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def atan2[T](x: Output[T], y: Output[T], name: String = "ATan2")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- def atan2Gradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): (Output[T], Output[T])
- Attributes
- protected
- def atanGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def atanh[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "ATanh")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def atanhGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def batchMatmulGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def binCount[T](input: Output[Int], dataType: core.types.DataType[T], weights: Output[T] = null, minLength: Output[Int] = null, maxLength: Output[Int] = null, name: String = "BinCount")(implicit arg0: core.types.TF[T], arg1: core.types.IsIntOrLongOrFloatOrDouble[T]): Output[T]
- def bucketize[T](input: Output[T], boundaries: Seq[Float], name: String = "Bucketize")(implicit arg0: core.types.TF[T], arg1: core.types.IsIntOrLongOrFloatOrDouble[T]): Output[T]
- def ceil[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Ceil")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[OL, T]): OL[T]
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @native() @HotSpotIntrinsicCandidate()
- def complexDouble(real: Output[Double], imag: Output[Double], name: String = "Complex"): Output[core.types.ComplexDouble]
- def complexDoubleGradient(op: Op[(Output[Double], Output[Double]), Output[core.types.ComplexDouble]], outputGradient: Output[core.types.ComplexDouble]): (Output[Double], Output[Double])
- def complexFloat(real: Output[Float], imag: Output[Float], name: String = "Complex"): Output[core.types.ComplexFloat]
- def complexFloatGradient(op: Op[(Output[Float], Output[Float]), Output[core.types.ComplexFloat]], outputGradient: Output[core.types.ComplexFloat]): (Output[Float], Output[Float])
- def conjugate[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "Conjugate")(implicit arg0: core.types.TF[T], ev: Aux[OL, T]): OL[T]
- def conjugateGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T]): Output[T]
- Attributes
- protected
- def cos[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Cos")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def cosGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def cosh[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Cosh")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def coshGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def countNonZero[T, I](input: Output[T], axes: Output[I] = null, keepDims: Boolean = false, name: String = "CountNonZero")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[Long]
- def countNonZeroSparse[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "CountNonZero")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[Long]
- def cross[T](a: Output[T], b: Output[T], name: String = "Cross")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- def crossGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
- def cumprod[T, I](input: Output[T], axis: Output[I], exclusive: Boolean = false, reverse: Boolean = false, name: String = "Cumprod")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def cumprodGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- def cumsum[T, I](input: Output[T], axis: Output[I], exclusive: Boolean = false, reverse: Boolean = false, name: String = "Cumsum")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def cumsumGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- def diag[T](diagonal: Output[T], name: String = "Diag")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def diagGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def diagPart[T](input: Output[T], name: String = "DiagPart")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def diagPartGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def digamma[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Digamma")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], ev: Aux[OL, T]): OL[T]
- def digammaGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- def divide[T](x: Output[T], y: Output[T], name: String = "Divide")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def divideGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equal[T](x: Output[T], y: Output[T], name: String = "Equal")(implicit arg0: core.types.TF[T]): Output[Boolean]
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def erf[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Erf")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def erfGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def erfc[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Erfc")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def erfcGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def exp[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Exp")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def expGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def expm1[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Expm1")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def expm1Gradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def floor[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Floor")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[OL, T]): OL[T]
- def floorMod[T](x: Output[T], y: Output[T], name: String = "FloorMod")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def gatherDropNegatives[T, I](parameters: Output[T], indices: Output[I], zeroClippedIndices: Output[I] = null, isPositive: Output[Boolean] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I], Output[Boolean])
- Attributes
- protected
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def greater[T](x: Output[T], y: Output[T], name: String = "Greater")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[Boolean]
- def greaterEqual[T](x: Output[T], y: Output[T], name: String = "GreaterEqual")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[Boolean]
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def igamma[T](a: Output[T], x: Output[T], name: String = "Igamma")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- def igammaGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): (Output[T], Output[T])
- Attributes
- protected
- def igammac[T](a: Output[T], x: Output[T], name: String = "Igammac")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- def igammacGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): (Output[T], Output[T])
- Attributes
- protected
- def imagDouble[OL[A] <: OutputLike[A]](input: OL[core.types.ComplexDouble], name: String = "Imag")(implicit ev: Aux[OL, core.types.ComplexDouble]): OL[Double]
- def imagDoubleGradient(op: Op[Output[core.types.ComplexDouble], Output[Double]], outputGradient: Output[Double]): Output[core.types.ComplexDouble]
- Attributes
- protected
- def imagFloat[OL[A] <: OutputLike[A]](input: OL[core.types.ComplexFloat], name: String = "Imag")(implicit ev: Aux[OL, core.types.ComplexFloat]): OL[Float]
- def imagFloatGradient(op: Op[Output[core.types.ComplexFloat], Output[Float]], outputGradient: Output[Float]): Output[core.types.ComplexFloat]
- Attributes
- protected
- def incompleteBeta[T](a: Output[T], b: Output[T], x: Output[T], name: String = "IncompleteBeta")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- def incompleteBetaGradient[T](op: Op[(Output[T], Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): (Output[T], Output[T], Output[T])
- Attributes
- protected
- def isFinite[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "IsFinite")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[OL, T]): OL[Boolean]
- def isInf[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "IsInf")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[OL, T]): OL[Boolean]
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def isNaN[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "IsNaN")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[OL, T]): OL[Boolean]
- def less[T](x: Output[T], y: Output[T], name: String = "Less")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[Boolean]
- def lessEqual[T](x: Output[T], y: Output[T], name: String = "LessEqual")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[Boolean]
- def linspace[T, I](start: Output[T], stop: Output[T], numberOfValues: Output[I], name: String = "LinSpace")(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrFloatOrDouble[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def log[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Log")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def log1p[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Log1p")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def log1pGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def logGamma[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "LogGamma")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], ev: Aux[OL, T]): OL[T]
- def logGammaGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- Attributes
- protected
- def logGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def logSigmoid[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "LogSigmoid")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], ev: Aux[OL, T]): OL[T]
- def logSumExp[T, I](input: Output[T], axes: Output[I] = null, keepDims: Boolean = false, name: String = "LogSumExp")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- def logicalAnd(x: Output[Boolean], y: Output[Boolean], name: String = "LogicalAnd"): Output[Boolean]
- def logicalNot(x: Output[Boolean], name: String = "LogicalNot"): Output[Boolean]
- def logicalOr(x: Output[Boolean], y: Output[Boolean], name: String = "LogicalOr"): Output[Boolean]
- def logicalXOr(x: Output[Boolean], y: Output[Boolean], name: String = "LogicalXOr"): Output[Boolean]
- def magnitudeDouble[OL[A] <: OutputLike[A]](x: OL[core.types.ComplexDouble], name: String = "Magnitude")(implicit ev: Aux[OL, core.types.ComplexDouble]): OL[Double]
- def magnitudeDoubleGradient(op: Op[Output[core.types.ComplexDouble], Output[Double]], outputGradient: Output[Double]): Output[core.types.ComplexDouble]
- Attributes
- protected
- def magnitudeFloat[OL[A] <: OutputLike[A]](x: OL[core.types.ComplexFloat], name: String = "Magnitude")(implicit ev: Aux[OL, core.types.ComplexFloat]): OL[Float]
- def magnitudeFloatGradient(op: Op[Output[core.types.ComplexFloat], Output[Float]], outputGradient: Output[Float]): Output[core.types.ComplexFloat]
- Attributes
- protected
- def matmul[T](a: Output[T], b: Output[T], transposeA: Boolean = false, transposeB: Boolean = false, conjugateA: Boolean = false, conjugateB: Boolean = false, aIsSparse: Boolean = false, bIsSparse: Boolean = false, name: String = "MatMul")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def matmulGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def matrixBandPart[T, I](input: Output[T], numSubDiagonals: Output[I], numSuperDiagonals: Output[I], name: String = "MatrixBandPart")(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Output[T]
- def matrixBandPartGradient[T, I](op: Op[(Output[T], Output[I], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): (Output[T], Output[I], Output[I])
- Attributes
- protected
- def matrixDiag[T](diagonal: Output[T], name: String = "MatrixDiag")(implicit arg0: core.types.TF[T]): Output[T]
- def matrixDiagGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T]): Output[T]
- Attributes
- protected
- def matrixDiagPart[T](input: Output[T], name: String = "MatrixDiagPart")(implicit arg0: core.types.TF[T]): Output[T]
- def matrixDiagPartGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T]): Output[T]
- Attributes
- protected
- def matrixSetDiag[T](input: Output[T], diagonal: Output[T], name: String = "MatrixSetDiag")(implicit arg0: core.types.TF[T]): Output[T]
- def matrixSetDiagGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T]): (Output[T], Output[T])
- Attributes
- protected
- def max[T, I](input: Output[T], axes: Output[I] = null, keepDims: Boolean = false, name: String = "Max")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- def maximum[T](x: Output[T], y: Output[T], name: String = "Maximum")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def maximumGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def mean[T, I](input: Output[T], axes: Output[I] = null, keepDims: Boolean = false, name: String = "Mean")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- def meanGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- def min[T, I](input: Output[T], axes: Output[I] = null, keepDims: Boolean = false, name: String = "Min")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- def minOrMaxGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- def minimum[T](x: Output[T], y: Output[T], name: String = "Minimum")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def minimumGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def mod[T](x: Output[T], y: Output[T], name: String = "Mod")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def multiply[T](x: Output[T], y: Output[T], name: String = "Multiply")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def multiplyGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def negate[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Negate")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def negateGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def notEqual[T](x: Output[T], y: Output[T], name: String = "NotEqual")(implicit arg0: core.types.TF[T]): Output[Boolean]
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- def polygamma[T](n: Output[T], x: Output[T], name: String = "Polygamma")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- def polygammaGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): (Output[T], Output[T])
- Attributes
- protected
- def pow[T](x: Output[T], y: Output[T], name: String = "Pow")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def powGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def prod[T, I](input: Output[T], axes: Output[I] = null, keepDims: Boolean = false, name: String = "Prod")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- def prodGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- def range[T](start: Output[T], limit: Output[T], delta: Output[T] = null, name: String = "Range")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- def realDivide[T](x: Output[T], y: Output[T], name: String = "RealDivide")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def realDivideGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def realDouble[OL[A] <: OutputLike[A]](input: OL[core.types.ComplexDouble], name: String = "Real")(implicit ev: Aux[OL, core.types.ComplexDouble]): OL[Double]
- def realDoubleGradient(op: Op[Output[core.types.ComplexDouble], Output[Double]], outputGradient: Output[Double]): Output[core.types.ComplexDouble]
- Attributes
- protected
- def realFloat[OL[A] <: OutputLike[A]](input: OL[core.types.ComplexFloat], name: String = "Real")(implicit ev: Aux[OL, core.types.ComplexFloat]): OL[Float]
- def realFloatGradient(op: Op[Output[core.types.ComplexFloat], Output[Float]], outputGradient: Output[Float]): Output[core.types.ComplexFloat]
- Attributes
- protected
- def reciprocal[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Reciprocal")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def reciprocalGradient[T](op: Op[OutputLike[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): OutputLike[T]
- Attributes
- protected
- def reciprocalHessian[T](op: Op[(Output[T], OutputLike[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], OutputLike[T])
- Attributes
- protected
- def reductionAxes[T, I, OL[A] <: OutputLike[A]](tensor: OL[T], axes: Output[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Output[I]
- Attributes
- protected
- def round[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Round")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def roundInt[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "RoundInt")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[OL, T]): OL[T]
- def rsqrt[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Rqsrt")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def rsqrtGradient[T](op: Op[OutputLike[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): OutputLike[T]
- Attributes
- protected
- def rsqrtHessian[T](op: Op[(Output[T], OutputLike[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], OutputLike[T])
- Attributes
- protected
- def safeShapeDiv(x: Output[Int], y: Output[Int]): Output[Int]
- Attributes
- protected
- def scalarMul[T, OL[A] <: OutputLike[A]](scalar: Output[T], tensor: OL[T], name: String = "ScalarMul")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def segmentMax[T, I](data: Output[T], segmentIndices: Output[I], name: String = "SegmentMax")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def segmentMean[T, I](data: Output[T], segmentIndices: Output[I], name: String = "SegmentMean")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def segmentMeanGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- def segmentMin[T, I](data: Output[T], segmentIndices: Output[I], name: String = "SegmentMin")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def segmentMinOrMaxGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- def segmentProd[T, I](data: Output[T], segmentIndices: Output[I], name: String = "SegmentProd")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def segmentSum[T, I](data: Output[T], segmentIndices: Output[I], name: String = "SegmentSum")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def segmentSumGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- def select[T](condition: Output[Boolean], x: Output[T], y: Output[T], name: String = "Select")(implicit arg0: core.types.TF[T]): Output[T]
- def selectGradient[T](op: Op[(Output[Boolean], Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T]): (Output[Boolean], Output[T], Output[T])
- Attributes
- protected
- def shapeFullySpecifiedAndEqual[T](x: Output[T], y: Output[T], gradient: Output[T])(implicit arg0: core.types.TF[T]): Boolean
- Attributes
- protected
- def sigmoid[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Sigmoid")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def sigmoidGradient[T](op: Op[OutputLike[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): OutputLike[T]
- Attributes
- protected
- def sigmoidHessian[T](op: Op[(Output[T], OutputLike[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], OutputLike[T])
- Attributes
- protected
- def sign[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Sign")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def signGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def sin[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Sin")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def sinGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def sinh[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Sinh")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def sinhGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def sparseMatmulGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def sparseSegmentMean[T, I1, I2](data: Output[T], indices: Output[I1], segmentIndices: Output[Int], numSegments: Output[I2] = null, name: String = "SparseSegmentMean")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): Output[T]
- def sparseSegmentMeanGradient[T, I1](op: Op[(Output[T], Output[I1], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1]): (Output[T], Output[I1], Output[Int])
- Attributes
- protected
- def sparseSegmentMeanWithNumSegmentsGradient[T, I1, I2](op: Op[(Output[T], Output[I1], Output[Int], Output[I2]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): (Output[T], Output[I1], Output[Int], Output[I2])
- Attributes
- protected
- def sparseSegmentSum[T, I1, I2](data: Output[T], indices: Output[I1], segmentIndices: Output[Int], numSegments: Output[I2] = null, name: String = "SparseSegmentSum")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): Output[T]
- def sparseSegmentSumGradient[T, I1](op: Op[(Output[T], Output[I1], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1]): (Output[T], Output[I1], Output[Int])
- Attributes
- protected
- def sparseSegmentSumSqrtN[T, I1, I2](data: Output[T], indices: Output[I1], segmentIndices: Output[Int], numSegments: Output[I2] = null, name: String = "SparseSegmentSumSqrtN")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): Output[T]
- def sparseSegmentSumSqrtNGradient[T, I1](op: Op[(Output[T], Output[I1], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1]): (Output[T], Output[I1], Output[Int])
- Attributes
- protected
- def sparseSegmentSumSqrtNWithNumSegmentsGradient[T, I1, I2](op: Op[(Output[T], Output[I1], Output[Int], Output[I2]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): (Output[T], Output[I1], Output[Int], Output[I2])
- Attributes
- protected
- def sparseSegmentSumWithNumSegmentsGradient[T, I1, I2](op: Op[(Output[T], Output[I1], Output[Int], Output[I2]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): (Output[T], Output[I1], Output[Int], Output[I2])
- Attributes
- protected
- def sqrt[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Sqrt")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def sqrtGradient[T](op: Op[OutputLike[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): OutputLike[T]
- Attributes
- protected
- def sqrtHessian[T](op: Op[(Output[T], OutputLike[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], OutputLike[T])
- Attributes
- protected
- def square[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Reciprocal")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def squareGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def squaredDifference[T](x: Output[T], y: Output[T], name: String = "SquaredDifference")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def squaredDifferenceGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def subtract[T](x: Output[T], y: Output[T], name: String = "Subtract")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def subtractGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Output[T])
- Attributes
- protected
- def sum[T, I](input: Output[T], axes: Output[I] = null, keepDims: Boolean = false, name: String = "Sum")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- def sumGradient[T, I](op: Op[(Output[T], Output[I]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def tan[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Tan")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def tanGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Attributes
- protected
- def tanh[T, OL[A] <: OutputLike[A]](x: OL[T], name: String = "Tanh")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[OL, T]): OL[T]
- def tanhGradient[T](op: Op[OutputLike[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): OutputLike[T]
- Attributes
- protected
- def tanhHessian[T](op: Op[(Output[T], OutputLike[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], OutputLike[T])
- Attributes
- protected
- def tensorDot[T](a: Output[T], b: Output[T], axesA: Seq[Int], axesB: Seq[Int], name: String)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def tensorDot[T](a: Output[T], b: Output[T], axesA: Seq[Int], axesB: Seq[Int])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def tensorDot[T](a: Output[T], b: Output[T], numAxes: Int, name: String)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def tensorDot[T](a: Output[T], b: Output[T], numAxes: Int)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def tensorDotDynamic[T](a: Output[T], b: Output[T], axesA: Output[Int], axesB: Output[Int], name: String = "TensorDot")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def tensorDotDynamic[T](a: Output[T], b: Output[T], axesA: Output[Int], axesB: Output[Int])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def tensorDotDynamic[T](a: Output[T], b: Output[T], numAxes: Output[Int], name: String)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def tensorDotDynamic[T](a: Output[T], b: Output[T], numAxes: Output[Int])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @throws(scala.this.throws.<init>$default$1[org.platanios.tensorflow.api.core.exception.InvalidArgumentException])
- def toString(): String
- Definition Classes
- AnyRef → Any
- def trace[T](input: Output[T], name: String = "Trace")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- def transposeConjugateToAdjoint[T](tensor: Output[T], transpose: Boolean, conj: Boolean)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Boolean)
- Attributes
- protected
- def transposeConjugateToTranspose[T](tensor: Output[T], transpose: Boolean, conj: Boolean)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): (Output[T], Boolean)
- Attributes
- protected
- def truncateDivide[T](x: Output[T], y: Output[T], name: String = "TruncateDivide")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def truncateMod[T](x: Output[T], y: Output[T], name: String = "TruncateMod")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def unsortedSegmentMax[T, I1, I2](data: Output[T], segmentIndices: Output[I1], segmentsNumber: Output[I2], name: String = "UnsortedSegmentMax")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Output[T]
- def unsortedSegmentMean[T, I1, I2](data: Output[T], segmentIndices: Output[I1], segmentsNumber: Output[I2], name: String = "UnsortedSegmentMean")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Output[T]
- def unsortedSegmentMin[T, I1, I2](data: Output[T], segmentIndices: Output[I1], segmentsNumber: Output[I2], name: String = "UnsortedSegmentMin")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Output[T]
- def unsortedSegmentMinOrMaxGradient[T, I1, I2](op: Op[(Output[T], Output[I1], Output[I2]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): (Output[T], Output[I1], Output[I2])
- Attributes
- protected
- def unsortedSegmentN[T, I1, I2](data: Output[T], segmentIndices: Output[I1], segmentsNumber: Output[I2], name: String = "UnsortedSegmentN")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Output[T]
- Attributes
- protected
- def unsortedSegmentProd[T, I1, I2](data: Output[T], segmentIndices: Output[I1], segmentsNumber: Output[I2], name: String = "UnsortedSegmentProd")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Output[T]
- def unsortedSegmentProdGradient[T, I1, I2](op: Op[(Output[T], Output[I1], Output[I2]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): (Output[T], Output[I1], Output[I2])
- Attributes
- protected
- def unsortedSegmentSqrtN[T, I1, I2](data: Output[T], segmentIndices: Output[I1], segmentsNumber: Output[I2], name: String = "UnsortedSegmentSqrtN")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Output[T]
- def unsortedSegmentSum[T, I1, I2](data: Output[T], segmentIndices: Output[I1], segmentsNumber: Output[I2], name: String = "UnsortedSegmentSum")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Output[T]
- def unsortedSegmentSumGradient[T, I1, I2](op: Op[(Output[T], Output[I1], Output[I2]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): (Output[T], Output[I1], Output[I2])
- Attributes
- protected
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- def zerosFraction[T](input: Output[T], name: String = "ZerosFraction")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[Float]
- def zeta[T](x: Output[T], q: Output[T], name: String = "Zeta")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Output[T]
- def zetaGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): (Output[T], Output[T])
- Attributes
- protected
- object bitwise extends Bitwise
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated @deprecated
- Deprecated
(Since version ) see corresponding Javadoc for more information.
- def floorDivide[T](x: Output[T], y: Output[T], name: String = "FloorDivide")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- Annotations
- @deprecated
- Deprecated
(Since version 0.1) Use
truncateDivide
instead.