Skip to content

Commit 6b1d1b1

Browse files
committed
Finish T1-1-12: minimum、atan2、addcdiv、bucketize、binary_cross_entropy
1 parent 3c8fb3c commit 6b1d1b1

33 files changed

Lines changed: 1294 additions & 18 deletions

include/infinicore/ops/addcdiv.hpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class Addcdiv {
9+
public:
10+
using schema = void (*)(Tensor, Tensor, Tensor, Tensor, float);
11+
static void execute(Tensor input, Tensor t1, Tensor t2, Tensor output, float value);
12+
static common::OpDispatcher<schema> &dispatcher();
13+
};
14+
15+
Tensor addcdiv(Tensor input, Tensor t1, Tensor t2, float value);
16+
void addcdiv_(Tensor input, Tensor t1, Tensor t2, Tensor output, float value);
17+
18+
} // namespace infinicore::op

include/infinicore/ops/atan2.hpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class Atan2 {
9+
public:
10+
using schema = void (*)(Tensor, Tensor, Tensor);
11+
static void execute(Tensor input, Tensor other, Tensor output);
12+
static common::OpDispatcher<schema> &dispatcher();
13+
};
14+
15+
Tensor atan2(Tensor input, Tensor other);
16+
void atan2_(Tensor input, Tensor other, Tensor output);
17+
18+
} // namespace infinicore::op
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
#include <optional>
6+
#include <string>
7+
8+
namespace infinicore::op {
9+
10+
class BinaryCrossEntropy {
11+
public:
12+
using schema = void (*)(Tensor, Tensor, std::optional<Tensor>, Tensor, std::string);
13+
static void execute(Tensor input, Tensor target, std::optional<Tensor> weight, Tensor output, std::string reduction);
14+
static common::OpDispatcher<schema> &dispatcher();
15+
};
16+
17+
Tensor binary_cross_entropy(Tensor input, Tensor target, std::optional<Tensor> weight, std::string reduction);
18+
void binary_cross_entropy_(Tensor input, Tensor target, std::optional<Tensor> weight, Tensor output, std::string reduction);
19+
20+
} // namespace infinicore::op
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class Bucketize {
9+
public:
10+
using schema = void (*)(Tensor, Tensor, Tensor, bool);
11+
static void execute(Tensor input, Tensor boundaries, Tensor output, bool right);
12+
static common::OpDispatcher<schema> &dispatcher();
13+
};
14+
15+
Tensor bucketize(Tensor input, Tensor boundaries, bool right = false);
16+
void bucketize_(Tensor input, Tensor boundaries, Tensor output, bool right = false);
17+
18+
} // namespace infinicore::op

include/infinicore/ops/minimum.hpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class Minimum {
9+
public:
10+
using schema = void (*)(Tensor, Tensor, Tensor);
11+
static void execute(Tensor input, Tensor other, Tensor output);
12+
static common::OpDispatcher<schema> &dispatcher();
13+
};
14+
15+
Tensor minimum(Tensor input, Tensor other);
16+
void minimum_(Tensor input, Tensor other, Tensor output);
17+
18+
} // namespace infinicore::op

python/infinicore/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,12 @@
4444
)
4545
from infinicore.ops.add import add
4646
from infinicore.ops.add_rms_norm import add_rms_norm, add_rms_norm_
47+
from infinicore.ops.addcdiv import addcdiv
48+
from infinicore.ops.atan2 import atan2
4749
from infinicore.ops.attention import attention
50+
from infinicore.ops.bucketize import bucketize
4851
from infinicore.ops.matmul import matmul
52+
from infinicore.ops.minimum import minimum
4953
from infinicore.ops.mul import mul
5054
from infinicore.ops.narrow import narrow
5155
from infinicore.ops.paged_attention import paged_attention

python/infinicore/nn/functional/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from .binary_cross_entropy import binary_cross_entropy
12
from .causal_softmax import causal_softmax
23
from .embedding import embedding
34
from .linear import linear
@@ -17,4 +18,5 @@
1718
"embedding",
1819
"rope",
1920
"RopeAlgo",
21+
"binary_cross_entropy",
2022
]
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import infinicore
2+
from infinicore.lib import _infinicore
3+
from infinicore.tensor import Tensor
4+
5+
6+
def binary_cross_entropy(
7+
input: Tensor,
8+
target: Tensor,
9+
weight: Tensor | None = None,
10+
size_average=None,
11+
reduce=None,
12+
reduction: str = "mean",
13+
*,
14+
out=None,
15+
) -> Tensor:
16+
r"""Apply the binary_cross_entropy function."""
17+
18+
if size_average is not None or reduce is not None:
19+
if reduce is False:
20+
reduction = "none"
21+
elif size_average is True or size_average is None:
22+
reduction = "mean"
23+
else:
24+
reduction = "sum"
25+
26+
if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
27+
return infinicore.ntops.torch.binary_cross_entropy(
28+
input, target, weight=weight, reduction=reduction, out=out
29+
)
30+
31+
weight_underlying = weight._underlying if weight is not None else None
32+
33+
if out is None:
34+
return Tensor(
35+
_infinicore.binary_cross_entropy(
36+
input._underlying, target._underlying, weight_underlying, reduction
37+
)
38+
)
39+
40+
_infinicore.binary_cross_entropy_(
41+
input._underlying,
42+
target._underlying,
43+
weight_underlying,
44+
out._underlying,
45+
reduction,
46+
)
47+
return out

python/infinicore/ops/addcdiv.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import infinicore
2+
from infinicore.lib import _infinicore
3+
from infinicore.tensor import Tensor
4+
5+
6+
def addcdiv(
7+
input: Tensor,
8+
tensor1: Tensor,
9+
tensor2: Tensor,
10+
*,
11+
value=1.0,
12+
out=None,
13+
) -> Tensor:
14+
r"""Apply the addcdiv function."""
15+
16+
if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
17+
return infinicore.ntops.torch.addcdiv(
18+
input, tensor1, tensor2, value=value, out=out
19+
)
20+
21+
if out is None:
22+
return Tensor(
23+
_infinicore.addcdiv(
24+
input._underlying,
25+
tensor1._underlying,
26+
tensor2._underlying,
27+
float(value),
28+
)
29+
)
30+
31+
_infinicore.addcdiv_(
32+
input._underlying,
33+
tensor1._underlying,
34+
tensor2._underlying,
35+
out._underlying,
36+
float(value),
37+
)
38+
return out

python/infinicore/ops/atan2.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import infinicore
2+
from infinicore.lib import _infinicore
3+
from infinicore.tensor import Tensor
4+
5+
6+
def atan2(
7+
input: Tensor,
8+
other: Tensor,
9+
*,
10+
out=None,
11+
) -> Tensor:
12+
r"""Apply the atan2 function."""
13+
14+
if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
15+
return infinicore.ntops.torch.atan2(input, other, out=out)
16+
17+
if out is None:
18+
return Tensor(_infinicore.atan2(input._underlying, other._underlying))
19+
20+
_infinicore.atan2_(input._underlying, other._underlying, out._underlying)
21+
return out

0 commit comments

Comments
 (0)