In the following examples, we assume
manager
is an instance of ai.djl.ndarray.NDManager
NDManager
$tensor
is a placeholder for an instance of torch.tensor
$ndarray
is a placeholder for an instance of ai.djl.ndarray.NDArray
you import the following packages
import ai.djl.ndarray.*;
import ai.djl.ndarray.types.*;
torch | djl |
---|---|
torch.bool |
DataType.BOOLEAN |
torch.uint8 |
DataType.UINT8 |
torch.int8 |
DataType.INT8 |
torch.int16 ,torch.short |
DataType.INT16 |
torch.int32 ,torch.int |
DataType.INT32 |
torch.int64 ,torch.long |
DataType.INT64 |
torch.float16 ,torch.half |
DataType.FLOAT16 |
torch.float32 ,torch.float |
DataType.FLOAT32 |
torch.float64 ,torch.double |
DataType.FLOAT64 |
torch | djl | note |
---|---|---|
tensor.tensor(array) |
manager.create(array) |
array may be [v0,v1,...] or [[v0,v1,...],...] |
$tensor.to('cpu') |
$ndarray.toDevice(Device.cpu(),false) |
|
torch.zeros(p,q,r) |
manager.zeros(new Shape(p,q,r)) |
|
torch.ones(p,q,r) |
manager.ones(new Shape(p,q,r)) |
|
torch.zeros_like($tensor) |
$ndarray.zerosLike() , manager.zeros($ndarray.getShape()) |
|
torch.ones_like($tensor) |
$ndarray.onesLike() , manager.ones($ndarray.getShape()) |
|
torch.full((p,q),fill_value=s) |
manager.full(new Shape(p,q),r) |
|
torch.rand(p,q,r) |
manager.randomUniform(from,to,new Shape(p,q,r)) |
randomUniform requires a range |
torch.randn(p,q,r) |
manager.randomNormal(new Shape(p,q,r)) |
|
torch.arange(p,q,r) |
manager.arange(p,q,r) |
|
torch.linspace(p,q,r) |
manager.linspace(p,q,r) |
|
torch.eye(n) |
manager.eye(n) |
torch.tensor
/manager.create
a = [
[1, 2, 3],
[4, 5, 6]
]
tensor.tensor(a)
float[][] array = { {1, 2, 3}, {4, 5, 6} };
NDArray nd = manager.create(array);
to('cpu')
/toDevice(Device.cpu())
NDArray nd = manager.create(new float[] {1, 2, 3, 4, 5});
nd = nd.toDevice(Device.cpu(), false);
// OR
nd = nd.toDevice(Device.gpu(), false);
torch.ones(1, 2, 3)
torch.zeros(1, 2, 3)
a = [
[1, 2, 3],
[4, 5, 6]
]
torch.ones_like(a)
torch.zeros_like(a)
NDArray ones = manager.ones(new Shape(1, 2, 3));
NDarray zeros = manager.zeros(new Shape(1, 2, 3));
NDArray a = manager.create(new float[][] { {1, 2, 3}, {4, 5, 6} });
ones = a.onesLike();
zeros= a.zerosLike();
NDArray
doesn’t hold gradient by default and you have to explicitly require grad.
a = [
[1, 2, 3],
[4, 5, 6]
]
torch.tensor(a, requires_grad=True, dtype=float)
NDArray a = manager.create(new float[][] { {1, 2, 3}, {4, 5, 6} });
a.setRequiresGradient(true);
a
Python
# fill by value
torch.full((2, 3), fill_value=42)
# fill by random value
torch.rand(1, 2, 3) # [0,1) uniform distribution
torch.randn(1, 2, 3) # (0,1) normal distribution
torch.randint(0, 5, (1, 2, 3))
# fill by sequential values
torch.arange(1, 5, 0.5)
torch.linspace(1, 4, 5)
# diag
torch.eye(3)
manager.full(new Shape(2, 3), 42);
Different from Python, you need to specify a range of uniform distribution.
var from = 0;
var to = 1;
manager.randomUniform(from, to, new Shape(1, 2, 3));
manager.randomNormal(new Shape(1, 2, 3));
manager.arange(1f, 4f, 0.5f);
manager.linspace(1f,4f,5);
manager.eye(3);
torch | djl | note |
---|---|---|
tensor.size() |
$ndarray.getShape() |
2x3x4 tensor/ndarray returns (2,3,4) |
tensor.ndim() |
$ndarray.getShape().dimension() |
2x3x4 tensor returns 3 |
??? | $ndarray.size() |
2x3x4 ndarray returns 24 |
tensor.reshape(p,q) |
$ndarray.reshape(p,q) |
|
torch.flatten($tensor) |
$ndarray.flatten() |
|
torch.squeeze($tensor) |
$ndarray.squeeze() |
|
torch.unsqueeze(tensor,dim) |
$ndarray.expandDims(dim) |
|
tensor.T, torch.t($tensor) |
$ndarray.transpose() |
|
torch.transpose(tensor,d0,d1) |
$ndarray.transpose(d0,d1) |
var a = manager.zeros(new Shape(2, 3, 4))
a.getShape().dimension(); // => 3
a.getShape(); // => (2, 3, 4)
a.size(); // => 24
var a = manager.zeros(new Shape(2, 3, 4));
a.reshape(new Shape(6, 4));
// is equal to a.reshape(new Shape(6, -1))
a.flatten();
// is equal to a.reshape(-1)
You can also specify flatten dimension.
a.flatten(1,2);
squeeze
Removes all singleton dimensions from this NDArray Shape.
var a = manager.zeros(new Shape(2, 1, 2));
[[0., 0.]]
has a redundant dimension. squeeze
method drops this dimension.
ND: (2, 1, 2) cpu() float32
[
[[0., 0.]],
[[0., 0.]],
]
a.squeeze();
You can also drop only specific singleton dimensions.
var a = manager.zeros(2, 1, 2, 1);
ND: (2, 1, 2, 1) cpu() float32
[
[[[0.],[0.]]],
[[[0.],[0.]]],
]
a.squeeze(1);
Python
a = torch.zeros(2, 2)
torch.unsqueeze(a, 0)
Java
var a = manager.zeros(new Shape(2,2));
ND: (2, 2) cpu() float32
[
[0., 0.],
[0., 0.],
]
a.expandDims(0);
var a = manager.create(new int[] {10, 20, 30, 40});
ND: (4) cpu() int32
[10, 20, 30, 40]
a.expandDims(-1);
Python
m = [
[1, 2],
[3, 4],
]
a = torch.tensor(m)
a.T
Java
var a = manager.create(new float[][] { {1, 2}, {3, 4} });
a.transpose();
In general, you can replace PyTorch fancy index expression with String interpolation.
torch | djl | |
---|---|---|
torch.flip(q,(n*)) |
$ndarray.flip(n*) |
|
torch.roll(q)) |
??? | |
$tensor[idx] |
$ndarray.get(idx) |
|
$tensor[n:] |
$ndarray.get("n:") ,$ndarray.get("{}:",n) |
|
$tensor[[p,q,r]] |
$ndarray.get(manager.create(new int[] {p,q,r})) |
|
$tensor[n,m] |
$ndarray.get(n,m) |
|
$tensor[$indices] |
$ndarray.get($indices) |
$indices is int tensor/ndarray of shape 2 x 2 |
$tensor[:,n] |
$ndarray.get(":,{}",n) |
|
$tensor[:,n:m] |
$ndarray.get(":,{}:{}",n,m) |
|
$tensor[:,[n,m]] |
$ndarray.get(":,{}",$colIndices) |
$colIndices is NDArray {n, m} . |
$tensor[[p,q,r],[s,t,u]] |
$ndarray.get("{},{}",$rowIndices,$colIndices) |
$tensor and $ndarray are 2 dimension tensor/ndarray. $rowIndices and $colIndices are 1 dimension int ndarray |
$tensor[[[p],[q],[r]],[s,t]] |
$ndarray.get("{},{}",$rowIndices.expandDims(-1),$colIndices) |
rowIndices is NDArray {p, q, r} |
var a = manager.arange(0, 100, 10);
a.get("5:");
a.get("{}:",5);
var indices = manager.create(new int[] {1, 3, 2});
a.get(indices);
var indices = manager.create(new int[][] { {2, 4}, {6, 8} });
a.get(indices);
var a = manager.create(new int[][] { {5, 10, 20}, {30, 40, 50}, {60, 70, 80} });
$tensor[n,m]
/$ndarray.get(n,m)
a.get(1, 2);
tensor[:,n]
/ ndarray.get(":,{}",n)
var n = 1;
a.get(":,{}", n);
tensor[:,n:m]
/ ndarray.get(":,{}:{}",n,m)
var n = 1;
var m = 2;
a.get(":,{}:{}", n, m);
tensor[:,[n,m]]
/ ndarray.get(":,{}",$colIndices)
var colIndices = manager.create(new int[] {2, 0});
a.get(":,{}", colIndices);
$tensor[[p,q,r],[s,t,u]]
/ $ndarray.get("{},{}",$rowIndices,$colIndices)
var rowIndices = manager.create(new int[] {0, 1, 2});
var colIndices = manager.create(new int[] {2, 0, 1);
// select values at (0,2), (1,0) and (2,1) in 2d ndarray
a.get("{},{}", rowIndices,colIndices);
$tensor[[[p],[q],[r]],[s,t]]
/ $ndarray.get({},{},$rowIndices.expandDims(-1),$colIndices)
var rowIndices = manager.create(new int[] {0, 1, 2}).expandDims(-1);
var colIndices = manager.create(new int[] {2, 0});
a.get("{},{}", rowIndices,colIndices);
var a = manager.create(new int[] {1,2,3,4,5,6,7,8}, new Shape(2,2,2));
ND: (2, 2, 2) cpu() int32
[
[
[ 1, 2],
[ 3, 4],
],
[
[ 5, 6],
[ 7, 8],
],
]
a.flip(0);
a.flip(1);
a.flip(2);
a.flip(0,1,2);
torch | djl | note |
---|---|---|
torch.cat(tensor0,tensor1,n) |
ndarray0.concat(ndarray1,n) |
vertically concat when n is 0 like np.vstack , horizontally concat when n is 1 like np.hstack |
torch.stack($tensor0,$tensor1) |
$ndarray0.stack($ndarray1) |
var zeros = manager.zeros(new Shape(2, 3));
var ones = manager.ones(new Shape(2, 3));
zeros.concat(ones, 0);
zeros.concat(ones, 1);
var images0 = manager.create(new int[][] { {128, 0, 0}, {0, 128, 0} });
var images1 = manager.create(new int[][] { {0, 0, 128}, {127, 127, 127} });
images0.stack(images1);
images0.stack(images1, 1);
images0.stack(images1, 2);
torch | djl |
---|---|
$tensor + 1 |
$ndarray.add(1) |
$tensor - 1 |
$ndarray.sub(1) |
$tensor * 2 |
$ndarray.mul(2) |
$tensor / 3 |
$ndarray.div(3) |
torch.mean($tensor) |
$ndarray.mean() |
torch.median($tensor) |
$ndarray.median() |
torch.sum($tensor) |
$ndarray.sum() |
torch.prod($tensor) |
$ndarray.prod() |
torch.cumsum($tensor) |
$ndarray.cumsum() |
torch.topk($tensor,k,dim) |
$ndarray.topK(k,dim) |
torch.kthvalue($tensor,k,dim) |
??? |
torch.mode($tensor) |
??? |
torch.std($tensor) |
??? |
torch.var($tensor) |
??? |
torch.std_mean($tensor) |
??? |
torch.abs($tensor) |
$ndarray.abs() |
torch.ceil($tensor) |
$ndarray.ceil() |
torch.round($tensor) |
$ndarray.round() |
torch.trunc($tensor) |
$ndarray.trunc() |
torch.flac($tensor) |
??? |
torch.clamp(tensor,min,max) |
??? |
torch.log($tensor) |
$ndarray.log() |
torch.log2($tensor) |
$ndarray.log2() |
torch.log10($tensor) |
$ndarray.log10() |
torch.pow($tensor,n) |
$ndarray.power(n) |
torch.pow(n,$tensor) |
??? |
torch.sigmoid($tensor) |
ai.djl.nn.Activation::sigmoid($ndarray) |
torch.sign($tensor) |
$ndarray.sign() |
torch.norm($tensor) |
$ndarray.norm() |
torch.dist($tensor0,$tensor,p) |
??? |
torch.cdist($tensor0,$tensor,p) |
??? |
torch.mean(tensor)
var a = manager.create(new float[] {0f,1f,2f,3f,4f,5f,6f,7f,8f,9f});
a.mean();
var a = manager.create(new float[] {0f,1f,2f,3f,4f,5f,6f,7f,8f,9f}).reshape(5,2);
a.mean(new int[] {0});
a.mean(new int[] {1});
torch.abs(tensor)
var ndarray = manager.create(new int[][] { {1, 2}, {-1, -2} });
ndarray.abs();
torch.ceil(tensor)
var ndarray = manager.create(new double[][] { {1.0,1.1,1.2,1.3,1.4}, {1.5,1.6,1.7,1.8,1.9} });
ndarray.ceil();
torch.floor(tensor)
var ndarray = manager.create(new double[][] { {1.0,1.1,1.2,1.3,1.4}, {1.5,1.6,1.7,1.8,1.9} });
ndarray.floor();
torch.round(tensor)
var ndarray = manager.create(new double[][] { {1.0,1.1,1.2,1.3,1.4}, {1.5,1.6,1.7,1.8,1.9} });
ndarray.round();
torch.sign(tensor)
var ndarray = manager.create(new double[][] { {-0.1, -0.0}, {0.0, 0.1} });
ndarray.sign();
var a = manager.create(new float[] {1f, 1f, 1f});
a.norm();
torch | djl |
---|---|
torch.isinf($tensor) |
$ndarray.isInfinite() |
torch.isfinite($tensor) |
??? |
torch.isnan($tensor) |
$ndarray.isNaN() |
torch.nonzero($tensor) |
$ndarray.nonzero() |
torch.masked_select(tensor0,maskTensor) |
$ndarray.booleanMask(maskndarray) |
torch.where(a, cond) |
??? |
torch.isinf(tensor)
var ndarray = manager.create(
new float[][] {
{Float.NegativeInfinity, Float.MinValue, 0.0f},
{Float.MaxValue, Float.PositiveInfinity, Float.NaN}
}
);
ndarray.isInfinite();
torch.isnan(tensor)
var ndarray = manager.create(
new float[][] {
{Float.NegativeInfinity, Float.MinValue, 0.0f},
{Float.MaxValue, Float.PositiveInfinity, Float.NaN}
}
);
ndarray.isNaN();
a = torch.tensor(
[
[0.0, 0.1],
[0.2, 0.3],
])
torch.nonzero()
var a = manager.create(new long[][] { {0.0,0.1}, {0.2,0.3} });
a.nonzero();
t = torch.tensor(
[
[0.1, 0.2],
[0.3, 0.4]
]
)
mask = torch.tensor(
[
[False, True],
[True, False]
]
)
torch.masked_select(t, mask)
var ndarray = manager.create(new double[][] { {0.1, 0.2}, {0.3, 0.4} });
var mask = manager.create(new boolean[][] { {false, true}, {true,false} });
ndarray.booleanMask(mask);