Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions include/infinicore/ops/diff.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#pragma once

#include "../device.hpp"
#include "../graph/graph.hpp"
#include "common/op.hpp"

namespace infinicore::op {

INFINICORE_GRAPH_OP_CLASS(Diff, Tensor, const Tensor &, int, int);

Tensor diff(const Tensor &x, int n = 1, int dim = -1);
void diff_(Tensor y, const Tensor &x, int n = 1, int dim = -1);

} // namespace infinicore::op
14 changes: 14 additions & 0 deletions include/infinicore/ops/digamma.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#pragma once

#include "../device.hpp"
#include "../graph/graph.hpp"
#include "common/op.hpp"

namespace infinicore::op {

INFINICORE_GRAPH_OP_CLASS(Digamma, Tensor, const Tensor &);

Tensor digamma(const Tensor &x);
void digamma_(Tensor y, const Tensor &x);

} // namespace infinicore::op
14 changes: 14 additions & 0 deletions include/infinicore/ops/dist.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#pragma once

#include "../device.hpp"
#include "../graph/graph.hpp"
#include "common/op.hpp"

namespace infinicore::op {

INFINICORE_GRAPH_OP_CLASS(Dist, Tensor, const Tensor &, const Tensor &, double);

Tensor dist(const Tensor &x1, const Tensor &x2, double p = 2.0);
void dist_(Tensor y, const Tensor &x1, const Tensor &x2, double p = 2.0);

} // namespace infinicore::op
14 changes: 14 additions & 0 deletions include/infinicore/ops/logdet.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#pragma once

#include "../device.hpp"
#include "../graph/graph.hpp"
#include "common/op.hpp"

namespace infinicore::op {

INFINICORE_GRAPH_OP_CLASS(Logdet, Tensor, const Tensor &);

Tensor logdet(const Tensor &x);
void logdet_(Tensor y, const Tensor &x);

} // namespace infinicore::op
25 changes: 25 additions & 0 deletions include/infinicore/ops/pad.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#pragma once

#include "../device.hpp"
#include "../graph/graph.hpp"
#include "common/op.hpp"

#include <string>
#include <vector>

namespace infinicore::op {

INFINICORE_GRAPH_OP_CLASS(Pad, Tensor, const Tensor &, const std::vector<int> &, const std::string &, double);

Tensor pad(const Tensor &x,
const std::vector<int> &pad,
const std::string &mode = "constant",
double value = 0.0);

void pad_(Tensor y,
const Tensor &x,
const std::vector<int> &pad,
const std::string &mode = "constant",
double value = 0.0);

} // namespace infinicore::op
5 changes: 5 additions & 0 deletions include/infiniop.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@
#include "infiniop/ops/dequant/per_tensor_dequant_int8.h"
#include "infiniop/ops/dequantize_awq.h"
#include "infiniop/ops/dequantize_gptq.h"
#include "infiniop/ops/diff.h"
#include "infiniop/ops/digamma.h"
#include "infiniop/ops/dist.h"
#include "infiniop/ops/embedding.h"
#include "infiniop/ops/equal.h"
#include "infiniop/ops/flash_attention.h"
Expand Down Expand Up @@ -59,11 +62,13 @@
#include "infiniop/ops/logaddexp.h"
#include "infiniop/ops/logaddexp2.h"
#include "infiniop/ops/logcumsumexp.h"
#include "infiniop/ops/logdet.h"
#include "infiniop/ops/lp_norm.h"
#include "infiniop/ops/masked_select.h"
#include "infiniop/ops/mul.h"
#include "infiniop/ops/multi_margin_loss.h"
#include "infiniop/ops/ones.h"
#include "infiniop/ops/pad.h"
#include "infiniop/ops/paged_attention.h"
#include "infiniop/ops/paged_attention_prefill.h"
#include "infiniop/ops/paged_caching.h"
Expand Down
26 changes: 26 additions & 0 deletions include/infiniop/ops/diff.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#ifndef __INFINIOP_DIFF_API_H__
#define __INFINIOP_DIFF_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopDiffDescriptor_t;

__INFINI_C __export infiniStatus_t infiniopCreateDiffDescriptor(infiniopHandle_t handle,
infiniopDiffDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t y,
infiniopTensorDescriptor_t x,
int dim,
int n);

__INFINI_C __export infiniStatus_t infiniopGetDiffWorkspaceSize(infiniopDiffDescriptor_t desc, size_t *size);

__INFINI_C __export infiniStatus_t infiniopDiff(infiniopDiffDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *y,
const void *x,
void *stream);

__INFINI_C __export infiniStatus_t infiniopDestroyDiffDescriptor(infiniopDiffDescriptor_t desc);

#endif
24 changes: 24 additions & 0 deletions include/infiniop/ops/digamma.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#ifndef __INFINIOP_DIGAMMA_API_H__
#define __INFINIOP_DIGAMMA_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopDigammaDescriptor_t;

__INFINI_C __export infiniStatus_t infiniopCreateDigammaDescriptor(infiniopHandle_t handle,
infiniopDigammaDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t y,
infiniopTensorDescriptor_t x);

__INFINI_C __export infiniStatus_t infiniopGetDigammaWorkspaceSize(infiniopDigammaDescriptor_t desc, size_t *size);

__INFINI_C __export infiniStatus_t infiniopDigamma(infiniopDigammaDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *y,
const void *x,
void *stream);

__INFINI_C __export infiniStatus_t infiniopDestroyDigammaDescriptor(infiniopDigammaDescriptor_t desc);

#endif
27 changes: 27 additions & 0 deletions include/infiniop/ops/dist.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#ifndef __INFINIOP_DIST_API_H__
#define __INFINIOP_DIST_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopDistDescriptor_t;

__INFINI_C __export infiniStatus_t infiniopCreateDistDescriptor(infiniopHandle_t handle,
infiniopDistDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t y,
infiniopTensorDescriptor_t x1,
infiniopTensorDescriptor_t x2,
double p);

__INFINI_C __export infiniStatus_t infiniopGetDistWorkspaceSize(infiniopDistDescriptor_t desc, size_t *size);

__INFINI_C __export infiniStatus_t infiniopDist(infiniopDistDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *y,
const void *x1,
const void *x2,
void *stream);

__INFINI_C __export infiniStatus_t infiniopDestroyDistDescriptor(infiniopDistDescriptor_t desc);

#endif
24 changes: 24 additions & 0 deletions include/infiniop/ops/logdet.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#ifndef __INFINIOP_LOGDET_API_H__
#define __INFINIOP_LOGDET_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopLogdetDescriptor_t;

__INFINI_C __export infiniStatus_t infiniopCreateLogdetDescriptor(infiniopHandle_t handle,
infiniopLogdetDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t y,
infiniopTensorDescriptor_t x);

__INFINI_C __export infiniStatus_t infiniopGetLogdetWorkspaceSize(infiniopLogdetDescriptor_t desc, size_t *size);

__INFINI_C __export infiniStatus_t infiniopLogdet(infiniopLogdetDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *y,
const void *x,
void *stream);

__INFINI_C __export infiniStatus_t infiniopDestroyLogdetDescriptor(infiniopLogdetDescriptor_t desc);

#endif
28 changes: 28 additions & 0 deletions include/infiniop/ops/pad.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#ifndef __INFINIOP_PAD_API_H__
#define __INFINIOP_PAD_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopPadDescriptor_t;

__INFINI_C __export infiniStatus_t infiniopCreatePadDescriptor(infiniopHandle_t handle,
infiniopPadDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t y,
infiniopTensorDescriptor_t x,
void *pad,
size_t pad_size,
const char *mode,
double value);

__INFINI_C __export infiniStatus_t infiniopGetPadWorkspaceSize(infiniopPadDescriptor_t desc, size_t *size);

__INFINI_C __export infiniStatus_t infiniopPad(infiniopPadDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *y,
const void *x,
void *stream);

__INFINI_C __export infiniStatus_t infiniopDestroyPadDescriptor(infiniopPadDescriptor_t desc);

#endif
9 changes: 8 additions & 1 deletion python/infinicore/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@
from infinicore.ops.cat import cat
from infinicore.ops.cdist import cdist
from infinicore.ops.cross_entropy import cross_entropy
from infinicore.ops.diff import diff
from infinicore.ops.digamma import digamma
from infinicore.ops.dist import dist
from infinicore.ops.equal import equal
from infinicore.ops.flipud import flipud
from infinicore.ops.float_power import float_power
Expand All @@ -88,6 +91,7 @@
from infinicore.ops.logaddexp import logaddexp
from infinicore.ops.logaddexp2 import logaddexp2
from infinicore.ops.logcumsumexp import logcumsumexp
from infinicore.ops.logdet import logdet
from infinicore.ops.logical_and import logical_and
from infinicore.ops.logical_not import logical_not
from infinicore.ops.masked_select import masked_select
Expand Down Expand Up @@ -177,7 +181,6 @@
"add",
"addr",
"add_rms_norm",
"add_rms_norm_",
"argwhere",
"asin",
"acos",
Expand All @@ -199,6 +202,10 @@
"matmul",
"equal",
"mul",
"diff",
"digamma",
"dist",
"logdet",
"narrow",
"ldexp",
"lerp",
Expand Down
2 changes: 2 additions & 0 deletions python/infinicore/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .linear_w8a8i8 import linear_w8a8i8
from .log_softmax import log_softmax
from .multi_margin_loss import multi_margin_loss
from .pad import pad
from .random_sample import random_sample
from .rms_norm import rms_norm
from .rope import RopeAlgo, rope
Expand Down Expand Up @@ -61,6 +62,7 @@
"rope",
"selu",
"hinge_embedding_loss",
"pad",
"silu",
"hardswish",
"hardtanh",
Expand Down
23 changes: 23 additions & 0 deletions python/infinicore/nn/functional/pad.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from __future__ import annotations

from collections.abc import Sequence
from typing import Optional

from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def pad(
input: Tensor,
pad: Sequence[int],
mode: str = "constant",
value: float = 0.0,
*,
out: Optional[Tensor] = None,
) -> Tensor:
pad_list = list(pad)
if out is None:
return Tensor(_infinicore.pad(input._underlying, pad_list, mode, value))

_infinicore.pad_(out._underlying, input._underlying, pad_list, mode, value)
return out
14 changes: 14 additions & 0 deletions python/infinicore/ops/diff.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from __future__ import annotations

from typing import Optional

from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def diff(input: Tensor, n: int = 1, dim: int = -1, *, out: Optional[Tensor] = None):
if out is None:
return Tensor(_infinicore.diff(input._underlying, n, dim))

_infinicore.diff_(out._underlying, input._underlying, n, dim)
return out
14 changes: 14 additions & 0 deletions python/infinicore/ops/digamma.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from __future__ import annotations

from typing import Optional

from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def digamma(input: Tensor, *, out: Optional[Tensor] = None):
if out is None:
return Tensor(_infinicore.digamma(input._underlying))

_infinicore.digamma_(out._underlying, input._underlying)
return out
14 changes: 14 additions & 0 deletions python/infinicore/ops/dist.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from __future__ import annotations

from typing import Optional

from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def dist(input: Tensor, other: Tensor, p: float = 2.0, *, out: Optional[Tensor] = None):
if out is None:
return Tensor(_infinicore.dist(input._underlying, other._underlying, p))

_infinicore.dist_(out._underlying, input._underlying, other._underlying, p)
return out
14 changes: 14 additions & 0 deletions python/infinicore/ops/logdet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from __future__ import annotations

from typing import Optional

from infinicore.lib import _infinicore
from infinicore.tensor import Tensor


def logdet(input: Tensor, *, out: Optional[Tensor] = None):
if out is None:
return Tensor(_infinicore.logdet(input._underlying))

_infinicore.logdet_(out._underlying, input._underlying)
return out
Loading
Loading