Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions include/infinicore/ops/bitwise_left_shift.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {
class BitwiseLeftShift {
public:
using schema = void (*)(Tensor, Tensor, Tensor);
static void execute(Tensor c, Tensor a, Tensor b);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor bitwise_left_shift(Tensor a, Tensor b);
void bitwise_left_shift_(Tensor c, Tensor a, Tensor b);
} // namespace infinicore::op
18 changes: 18 additions & 0 deletions include/infinicore/ops/fold.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"
#include <tuple>

namespace infinicore::op {
class Fold {
public:
using schema = void (*)(Tensor, Tensor, std::tuple<size_t, size_t>, std::tuple<size_t, size_t>, std::tuple<size_t, size_t>, std::tuple<size_t, size_t>, std::tuple<size_t, size_t>);
// Pytorch 文档目前说明了只支持 (N, C, H, W) 和 (C, H, W) 格式的输入输出
static void execute(Tensor output, Tensor input, std::tuple<size_t, size_t> output_size, std::tuple<size_t, size_t> kernel_size, std::tuple<size_t, size_t> dilation, std::tuple<size_t, size_t> padding, std::tuple<size_t, size_t> stride);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor fold(Tensor input, std::tuple<size_t, size_t> output_size, std::tuple<size_t, size_t> kernel_size, std::tuple<size_t, size_t> dilation, std::tuple<size_t, size_t> padding, std::tuple<size_t, size_t> stride);
void fold_(Tensor output, Tensor input, std::tuple<size_t, size_t> output_size, std::tuple<size_t, size_t> kernel_size, std::tuple<size_t, size_t> dilation, std::tuple<size_t, size_t> padding, std::tuple<size_t, size_t> stride);
} // namespace infinicore::op
16 changes: 16 additions & 0 deletions include/infinicore/ops/index_select.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {
class IndexSelect {
public:
using schema = void (*)(Tensor, Tensor, int, Tensor);
static void execute(Tensor output, Tensor input, int dim, Tensor index);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor index_select(Tensor input, int dim, Tensor index);
void index_select_(Tensor output, Tensor input, int dim, Tensor index);
} // namespace infinicore::op
16 changes: 16 additions & 0 deletions include/infinicore/ops/log2.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {
class Log2 {
public:
using schema = void (*)(Tensor, Tensor);
static void execute(Tensor output, Tensor input);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor log2(Tensor input);
void log2_(Tensor output, Tensor input);
} // namespace infinicore::op
15 changes: 15 additions & 0 deletions include/infinicore/ops/mish.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {
class Mish {
public:
using schema = void (*)(Tensor, Tensor, bool);
static void execute(Tensor output, Tensor input, bool inplace);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor mish(Tensor input, bool inplace);
} // namespace infinicore::op
15 changes: 15 additions & 0 deletions include/infinicore/ops/zeros.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#pragma once

#include "common/op.hpp"

namespace infinicore::op {
class Zeros {

public:
using schema = void (*)(Tensor);
static void execute(Tensor output);
static common::OpDispatcher<schema> &dispatcher();
};

void zeros_(Tensor output);
} // namespace infinicore::op
5 changes: 5 additions & 0 deletions python/infinicore/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@
from infinicore.ops.mul import mul
from infinicore.ops.narrow import narrow
from infinicore.ops.rearrange import rearrange
from infinicore.ops.bitwise_left_shift import bitwise_left_shift
from infinicore.ops.index_select import index_select
from infinicore.ops.log2 import log2
from infinicore.tensor import (
Tensor,
empty,
Expand Down Expand Up @@ -115,6 +118,8 @@
"strided_empty",
"strided_from_blob",
"zeros",
"bitwise_left_shift",
"index_select",
]

use_ntops = False
Expand Down
3 changes: 2 additions & 1 deletion python/infinicore/nn/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from infinicore.nn import functional
from infinicore.nn.modules import * # noqa: F403
from infinicore.nn.parameter import InfiniCoreParameter as Parameter
from infinicore.nn import init

__all__ = ["functional", "Parameter"]
__all__ = ["functional", "Parameter", "init"]
4 changes: 4 additions & 0 deletions python/infinicore/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
from .rope import RopeAlgo, rope
from .silu import silu
from .swiglu import swiglu
from .fold import fold
from .mish import mish

__all__ = [
"causal_softmax",
Expand All @@ -17,4 +19,6 @@
"embedding",
"rope",
"RopeAlgo",
"fold",
"mish",
]
47 changes: 47 additions & 0 deletions python/infinicore/nn/functional/fold.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import infinicore
from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def fold(
input: Tensor,
output_size: int | tuple[int, int],
kernel_size: int | tuple[int, int],
dilation: int | tuple[int, int] = 1,
padding: int | tuple[int, int] = 0,
stride: int | tuple[int, int] = 1,
) -> Tensor:
r"""Combines an array of sliding local blocks into a large containing tensor. Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported."""

if isinstance(output_size, int):
output_size = (output_size, output_size)

if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)

if isinstance(dilation, int):
dilation = (dilation, dilation)

if isinstance(padding, int):
padding = (padding, padding)

if isinstance(stride, int):
stride = (stride, stride)

assert input.ndim in (3, 4), "only 3D or 4D input tensors are supported"
assert len(output_size) == 2, "output_size must be a tuple of two integers (H, W)"
assert len(kernel_size) == 2, "kernel_size must be a tuple of two integers (kH, kW)"
assert len(dilation) == 2, "dilation must be a tuple of two integers (dH, dW)"
assert len(padding) == 2, "padding must be a tuple of two integers (pH, pW)"
assert len(stride) == 2, "stride must be a tuple of two integers (sH, sW)"

if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
return infinicore.ntops.torch.fold(
input, output_size, kernel_size, dilation, padding, stride
)

return Tensor(
_infinicore.fold(
input._underlying, output_size, kernel_size, dilation, padding, stride
)
)
20 changes: 20 additions & 0 deletions python/infinicore/nn/functional/mish.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import infinicore
from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def mish(
input: Tensor, inplace: bool = False
) -> Tensor:
r"""Applies the Mish activation function element-wise: mish(x) = x * tanh(softplus(x))."""

if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
return infinicore.ntops.torch.mish(input, inplace)

if inplace:
_infinicore.mish(input._underlying, inplace)
return input
else:
return Tensor(
_infinicore.mish(input._underlying, inplace)
)
5 changes: 5 additions & 0 deletions python/infinicore/nn/init/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .zeros_ import zeros_

__all__ = [
"zeros_",
]
9 changes: 9 additions & 0 deletions python/infinicore/nn/init/zeros_.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import infinicore
from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def zeros_(input: Tensor) -> Tensor:
r"""Fill the input tensor with the scalar value 0."""
_infinicore.zeros_(input._underlying)
return input
21 changes: 21 additions & 0 deletions python/infinicore/ops/bitwise_left_shift.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import infinicore
from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def bitwise_left_shift(input: Tensor, other: Tensor, *, out=None) -> Tensor:
r"""Computes the left arithmetic shift of input by other bits. The input tensor must be of integral type."""

if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
return infinicore.ntops.torch.bitwise_left_shift(input, other, out=out)

if out is None:
return Tensor(
_infinicore.bitwise_left_shift(input._underlying, other._underlying)
)

_infinicore.bitwise_left_shift_(
out._underlying, input._underlying, other._underlying
)

return out
20 changes: 20 additions & 0 deletions python/infinicore/ops/index_select.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import infinicore
from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def index_select(input: Tensor, dim: int, index: Tensor, *, out=None) -> Tensor:
r"""Selects elements from input along a specific dimension."""

if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
return infinicore.ntops.torch.index_select(input, dim, index, out=out)
if out is None:
return Tensor(
_infinicore.index_select(input._underlying, dim, index._underlying)
)

_infinicore.index_select_(
out._underlying, input._underlying, dim, index._underlying
)

return out
21 changes: 21 additions & 0 deletions python/infinicore/ops/log2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import infinicore
from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def log2(input: Tensor, *, out=None) -> Tensor:
r"""Computes the base-2 logarithm of the input tensor element-wise."""

if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
return infinicore.ntops.torch.log2(input, out=out)

if out is None:
return Tensor(
_infinicore.log2(input._underlying)
)

_infinicore.log2_(
out._underlying, input._underlying
)

return out
28 changes: 28 additions & 0 deletions src/infinicore/ops/bitwise_left_shift/bitwise_left_shift.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#include "infinicore/ops/bitwise_left_shift.hpp"

#include "../../utils.hpp"

namespace infinicore::op {

common::OpDispatcher<BitwiseLeftShift::schema> &BitwiseLeftShift::dispatcher() {
static common::OpDispatcher<BitwiseLeftShift::schema> dispatcher_;
return dispatcher_;
};

void BitwiseLeftShift::execute(Tensor c, Tensor a, Tensor b) {
INFINICORE_ASSERT_TENSORS_SAME_DEVICE(c, a, b);
infinicore::context::setDevice(c->device(), true);
dispatcher().lookup(c->device().getType())(c, a, b);
}

Tensor bitwise_left_shift(Tensor a, Tensor b) {
auto c = Tensor::empty(a->shape(), a->dtype(), a->device());
bitwise_left_shift_(c, a, b);
return c;
}

void bitwise_left_shift_(Tensor c, Tensor a, Tensor b) {
BitwiseLeftShift::execute(c, a, b);
}

} // namespace infinicore::op
Loading