2023-02-24 19:10:32 +01:00
|
|
|
#include "tensor.h"
|
|
|
|
|
|
|
|
|
|
tensor tensor_new(void)
|
|
|
|
|
{
|
|
|
|
|
return calloc(1, sizeof(struct _tensor));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void tensor_destroy(tensor t)
|
|
|
|
|
{
|
|
|
|
|
if (!tensor_is_empty(t)) {
|
|
|
|
|
free(t->size);
|
|
|
|
|
free(t->elements);
|
2023-03-13 15:45:30 +01:00
|
|
|
free(t->index_offsets);
|
2023-02-24 19:10:32 +01:00
|
|
|
}
|
2023-03-13 15:45:30 +01:00
|
|
|
free(t);
|
2023-02-24 19:10:32 +01:00
|
|
|
}
|
|
|
|
|
|
2023-03-12 20:29:55 +01:00
|
|
|
int tensor_is_empty(const tensor t)
|
|
|
|
|
{
|
2023-02-24 19:10:32 +01:00
|
|
|
return t->elements == NULL || t->size == NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-12 20:29:55 +01:00
|
|
|
int tensor_is_equal(const tensor t1, const tensor t2)
|
|
|
|
|
{
|
2023-03-13 15:45:30 +01:00
|
|
|
assert(!tensor_is_empty(t1));
|
|
|
|
|
assert(!tensor_is_empty(t2));
|
|
|
|
|
|
2023-03-12 20:29:55 +01:00
|
|
|
int i;
|
|
|
|
|
if (t1->dimension != t2->dimension) return 0;
|
|
|
|
|
for (i = 0; i < t1->dimension; i++) {
|
|
|
|
|
if (t1->size[i] != t2->size[i]) return 0;
|
|
|
|
|
}
|
|
|
|
|
for (i = 0; i < t1->num_elem; i++) {
|
|
|
|
|
if (t1->elements[i] != t2->elements[i]) return 0;
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-24 19:10:32 +01:00
|
|
|
int _tensor_check_size(const int *size, int dim)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
if(dim < 0) return 0;
|
|
|
|
|
for(i = 0; i < dim; i++) {
|
|
|
|
|
if(size[i] < 1) return 0;
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int _tensor_set_size(tensor t, const int *size, int dim)
|
|
|
|
|
{
|
2023-03-13 15:45:30 +01:00
|
|
|
/* Sets the size of a Tensor. During this process all data in the tensor t is lost. */
|
|
|
|
|
|
|
|
|
|
int *temp1;
|
|
|
|
|
int *temp2;
|
|
|
|
|
dtype *temp3;
|
|
|
|
|
int i, j, num_elem = 1;
|
|
|
|
|
|
|
|
|
|
if(!_tensor_check_size(size, dim)) return 0;
|
2023-02-24 19:10:32 +01:00
|
|
|
|
2023-03-13 15:45:30 +01:00
|
|
|
/* Try allocating memory for the size/ index_offset array of the tensor */
|
2023-02-24 19:10:32 +01:00
|
|
|
for(i = 0; i < dim; i++) {
|
|
|
|
|
num_elem *= size[i];
|
|
|
|
|
}
|
2023-03-13 15:45:30 +01:00
|
|
|
temp1 = malloc(dim * sizeof(int));
|
|
|
|
|
temp2 = malloc(dim * sizeof(int));
|
|
|
|
|
temp3 = malloc(num_elem * sizeof(dtype));
|
|
|
|
|
if((temp1 == NULL && dim != 0) || (temp2 == NULL && dim != 0) || temp3 == NULL) {
|
|
|
|
|
free(temp1);
|
|
|
|
|
free(temp2);
|
2023-02-24 19:10:32 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-13 15:45:30 +01:00
|
|
|
/* Freeing old memory. */
|
|
|
|
|
free(t->size);
|
|
|
|
|
free(t->index_offsets);
|
|
|
|
|
free(t->elements);
|
|
|
|
|
|
2023-02-24 19:10:32 +01:00
|
|
|
/* Setting the size array */
|
2023-03-13 15:45:30 +01:00
|
|
|
t->size = temp1;
|
2023-02-24 19:10:32 +01:00
|
|
|
if(dim != 0) memcpy(t->size, size, dim * sizeof(int));
|
|
|
|
|
t->dimension = dim;
|
2023-03-13 15:45:30 +01:00
|
|
|
/* Setting the index_offset array */
|
|
|
|
|
t->index_offsets = temp2;
|
|
|
|
|
for(i = 0; i < t->dimension; i++) {
|
|
|
|
|
t->index_offsets[i] = 1;
|
|
|
|
|
for(j = i + 1; j < t->dimension; j++) {
|
|
|
|
|
t->index_offsets[i] *= t->size[j];
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-02-24 19:10:32 +01:00
|
|
|
/* Setting the elements pointer and memory usage */
|
2023-03-13 15:45:30 +01:00
|
|
|
t->elements = temp3;
|
2023-02-24 19:10:32 +01:00
|
|
|
t->num_elem = num_elem;
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-24 20:53:00 +01:00
|
|
|
int tensor_set(tensor t, const int *index, dtype val)
|
2023-02-24 19:10:32 +01:00
|
|
|
{
|
2023-03-13 15:45:30 +01:00
|
|
|
assert(!tensor_is_empty(t));
|
2023-03-12 21:29:10 +01:00
|
|
|
|
2023-03-13 15:45:30 +01:00
|
|
|
int i, offset = 0;
|
2023-03-12 21:29:10 +01:00
|
|
|
|
2023-03-09 18:24:17 +01:00
|
|
|
if(t->dimension == 0) {
|
|
|
|
|
t->elements[0] = val;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
2023-02-24 19:10:32 +01:00
|
|
|
|
2023-03-12 21:29:10 +01:00
|
|
|
for(i = 0; i < t->dimension; i++) {
|
2023-02-24 20:16:28 +01:00
|
|
|
if(t->size[i] <= index[i]) return 0;
|
2023-03-13 15:45:30 +01:00
|
|
|
offset += t->index_offsets[i] * index[i];
|
2023-02-24 19:10:32 +01:00
|
|
|
}
|
2023-03-12 21:29:10 +01:00
|
|
|
|
2023-02-24 19:10:32 +01:00
|
|
|
t->elements[offset] = val;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-24 20:53:00 +01:00
|
|
|
dtype tensor_get(const tensor t, const int *index, int *success)
|
2023-02-24 19:10:32 +01:00
|
|
|
{
|
2023-03-13 15:45:30 +01:00
|
|
|
assert(!tensor_is_empty(t));
|
2023-03-12 21:29:10 +01:00
|
|
|
|
2023-03-13 15:45:30 +01:00
|
|
|
int i, offset = 0;
|
2023-03-12 21:29:10 +01:00
|
|
|
|
2023-02-24 20:16:28 +01:00
|
|
|
if(t->dimension == 0) return t->elements[0];
|
2023-02-24 19:10:32 +01:00
|
|
|
|
2023-03-12 21:29:10 +01:00
|
|
|
for(i = 0; i < t->dimension; i++) {
|
2023-02-24 19:10:32 +01:00
|
|
|
if(t->size[i] <= index[i]) {
|
|
|
|
|
if(success != NULL) *success = 0;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2023-03-13 15:45:30 +01:00
|
|
|
offset += t->index_offsets[i] * index[i];
|
2023-02-24 19:10:32 +01:00
|
|
|
}
|
2023-02-24 20:16:28 +01:00
|
|
|
|
2023-02-24 19:10:32 +01:00
|
|
|
if(success != NULL) *success = 1;
|
|
|
|
|
return t->elements[offset];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int tensor_init_one(tensor t, int dimension, const int *size)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if(!_tensor_set_size(t, size, dimension)) return 0;
|
|
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
2023-02-24 20:53:00 +01:00
|
|
|
t->elements[i] = (dtype) 1;
|
2023-02-24 19:10:32 +01:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int tensor_init_zero(tensor t, int dimension, const int *size)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if(!_tensor_set_size(t, size, dimension)) return 0;
|
|
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
2023-02-24 20:53:00 +01:00
|
|
|
t->elements[i] = (dtype) 0;
|
2023-02-24 19:10:32 +01:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-24 19:20:09 +01:00
|
|
|
int tensor_init_rand(tensor t, int dimension, const int *size, int max)
|
2023-02-24 19:10:32 +01:00
|
|
|
{
|
|
|
|
|
int i;
|
2023-02-27 14:45:21 +01:00
|
|
|
static int last_seed;
|
|
|
|
|
last_seed += time(NULL) * 200 + rand();
|
|
|
|
|
srand(last_seed);
|
2023-02-24 19:10:32 +01:00
|
|
|
|
|
|
|
|
if(!_tensor_set_size(t, size, dimension)) return 0;
|
|
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
2023-02-27 14:45:21 +01:00
|
|
|
t->elements[i] = (dtype) ((double) rand() / RAND_MAX * max);
|
2023-02-24 19:10:32 +01:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-18 20:52:26 +01:00
|
|
|
int tensor_cpy(tensor t1, const tensor t2)
|
|
|
|
|
{
|
|
|
|
|
assert(!tensor_is_empty(t2));
|
|
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
if(!_tensor_set_size(t1, t2->size, t2->dimension)) return 0;
|
|
|
|
|
for(i = 0; i < t2->num_elem; i++) {
|
|
|
|
|
t1->elements[i] = t2->elements[i];
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void tensor_add_scalar(tensor t, dtype n)
|
|
|
|
|
{
|
|
|
|
|
assert(!tensor_is_empty(t));
|
|
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
|
|
|
|
t->elements[i] += n;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void tensor_sub_scalar(tensor t, dtype n)
|
|
|
|
|
{
|
|
|
|
|
assert(!tensor_is_empty(t));
|
|
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
|
|
|
|
t->elements[i] -= n;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void tensor_mult_scalar(tensor t, dtype n)
|
|
|
|
|
{
|
|
|
|
|
assert(!tensor_is_empty(t));
|
|
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
|
|
|
|
t->elements[i] *= n;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void tensor_div_scalar(tensor t, dtype n)
|
|
|
|
|
{
|
|
|
|
|
assert(!tensor_is_empty(t));
|
|
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
|
|
|
|
t->elements[i] /= n;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-27 14:45:21 +01:00
|
|
|
int tensor_add(tensor t1, const tensor t2)
|
2023-02-24 19:10:32 +01:00
|
|
|
{
|
2023-03-13 15:45:30 +01:00
|
|
|
assert(!tensor_is_empty(t1));
|
|
|
|
|
assert(!tensor_is_empty(t2));
|
|
|
|
|
|
2023-02-24 19:10:32 +01:00
|
|
|
int i;
|
2023-02-27 14:45:21 +01:00
|
|
|
if(t1->dimension != t2->dimension) return 0;
|
|
|
|
|
for(i = 0; i < t1->dimension; i++) {
|
|
|
|
|
if(t1->size[i] != t2->size[i]) return 0;
|
|
|
|
|
}
|
|
|
|
|
for(i = 0; i < t1->num_elem; i++) {
|
|
|
|
|
t1->elements[i] += t2->elements[i];
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
2023-02-24 19:10:32 +01:00
|
|
|
|
2023-02-27 14:45:21 +01:00
|
|
|
void tensor_for_each_elem(tensor t, dtype (*func)(dtype))
|
|
|
|
|
{
|
2023-03-13 15:45:30 +01:00
|
|
|
assert(!tensor_is_empty(t));
|
|
|
|
|
|
2023-02-27 14:45:21 +01:00
|
|
|
int i;
|
2023-02-24 19:10:32 +01:00
|
|
|
for(i = 0; i < t->num_elem; i++) {
|
|
|
|
|
t->elements[i] = func(t->elements[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-24 19:17:31 +01:00
|
|
|
void tensor_print(const tensor t)
|
2023-02-24 19:10:32 +01:00
|
|
|
{
|
|
|
|
|
int i, j;
|
|
|
|
|
int *indx;
|
|
|
|
|
|
|
|
|
|
if(tensor_is_empty(t)){
|
|
|
|
|
printf("<empty tensor>\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printf("Tensor of dimension %i and size (", t->dimension);
|
|
|
|
|
for(i = 0; i < t->dimension - 1; i++) {
|
|
|
|
|
printf("%i, ", t->size[i]);
|
|
|
|
|
}
|
|
|
|
|
if(t->dimension == 0) printf("): ");
|
|
|
|
|
else printf("%i): ", t->size[t->dimension - 1]);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if(t->dimension == 0) {
|
2023-03-09 18:24:17 +01:00
|
|
|
/* scalar */
|
2023-02-24 19:10:32 +01:00
|
|
|
printf(PRINT_STRING, t->elements[0]);
|
|
|
|
|
putchar('\n');
|
|
|
|
|
} else if (t->dimension == 1) {
|
2023-03-09 18:24:17 +01:00
|
|
|
/* column vector */
|
2023-02-24 19:10:32 +01:00
|
|
|
if(t->size[0] == 1) {
|
|
|
|
|
putchar('(');
|
|
|
|
|
printf(PRINT_STRING, t->elements[0]);
|
|
|
|
|
printf(")\n");
|
|
|
|
|
} else {
|
|
|
|
|
printf("\n/");
|
|
|
|
|
printf(PRINT_STRING, t->elements[0]);
|
|
|
|
|
printf("\\\n");
|
|
|
|
|
for(i = 1; i < t->size[0] - 1; i++) {
|
|
|
|
|
putchar('|');
|
|
|
|
|
printf(PRINT_STRING, t->elements[i]);
|
|
|
|
|
printf("|\n");
|
|
|
|
|
}
|
|
|
|
|
printf("\\");
|
|
|
|
|
printf(PRINT_STRING, t->elements[t->size[0] - 1]);
|
|
|
|
|
printf("/\n");
|
|
|
|
|
}
|
|
|
|
|
} else if (t->dimension == 2) {
|
2023-03-09 18:24:17 +01:00
|
|
|
/* matix */
|
2023-02-24 19:10:32 +01:00
|
|
|
indx = malloc(sizeof(int) * 2);
|
|
|
|
|
if(t->size[0] == 1) {
|
|
|
|
|
putchar('(');
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[0] = 0;
|
2023-02-24 19:10:32 +01:00
|
|
|
for(i = 0; i < t->size[1]; i++) {
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[1] = i;
|
2023-02-24 19:10:32 +01:00
|
|
|
printf(PRINT_STRING, tensor_get(t, indx, NULL));
|
|
|
|
|
}
|
|
|
|
|
printf(")\n");
|
|
|
|
|
} else {
|
|
|
|
|
printf("\n/");
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[0] = 0;
|
2023-02-24 19:10:32 +01:00
|
|
|
for(i = 0; i < t->size[1]; i++) {
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[1] = i;
|
2023-02-24 19:10:32 +01:00
|
|
|
printf(PRINT_STRING, tensor_get(t, indx, NULL));
|
|
|
|
|
}
|
|
|
|
|
printf("\\\n");
|
|
|
|
|
for(i = 1; i < t->size[0] - 1; i++) {
|
|
|
|
|
putchar('|');
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[0] = i;
|
2023-02-24 19:10:32 +01:00
|
|
|
for(j = 0; j < t->size[1]; j++) {
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[1] = j;
|
2023-02-24 19:10:32 +01:00
|
|
|
printf(PRINT_STRING, tensor_get(t, indx, NULL));
|
|
|
|
|
}
|
|
|
|
|
printf("|\n");
|
|
|
|
|
}
|
|
|
|
|
printf("\\");
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[0] = t->size[0] - 1;
|
2023-02-24 19:10:32 +01:00
|
|
|
for(i = 0; i < t->size[1]; i++) {
|
2023-02-24 20:16:28 +01:00
|
|
|
indx[1] = i;
|
2023-02-24 19:10:32 +01:00
|
|
|
printf(PRINT_STRING, tensor_get(t, indx, NULL));
|
|
|
|
|
}
|
|
|
|
|
printf("/\n");
|
|
|
|
|
}
|
|
|
|
|
free(indx);
|
|
|
|
|
} else {
|
|
|
|
|
printf(" print function not yet implemented for dim > 2.");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|