diff options
Diffstat (limited to 'libgfortran/generated')
-rw-r--r-- | libgfortran/generated/unpack_c10.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_c16.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_c4.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_c8.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_i1.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_i16.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_i2.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_i4.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_i8.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_r10.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_r16.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_r4.c | 338 | ||||
-rw-r--r-- | libgfortran/generated/unpack_r8.c | 338 |
13 files changed, 4394 insertions, 0 deletions
diff --git a/libgfortran/generated/unpack_c10.c b/libgfortran/generated/unpack_c10.c new file mode 100644 index 00000000000..e6f3ecf2652 --- /dev/null +++ b/libgfortran/generated/unpack_c10.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_COMPLEX_10) + +void +unpack0_c10 (gfc_array_c10 *ret, const gfc_array_c10 *vector, + const gfc_array_l1 *mask, const GFC_COMPLEX_10 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_10 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_10 *vptr; + /* Value for field, this is constant. */ + const GFC_COMPLEX_10 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_10)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_c10 (gfc_array_c10 *ret, const gfc_array_c10 *vector, + const gfc_array_l1 *mask, const gfc_array_c10 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_10 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_10 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_COMPLEX_10 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_10)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_c16.c b/libgfortran/generated/unpack_c16.c new file mode 100644 index 00000000000..2d82a10fc84 --- /dev/null +++ b/libgfortran/generated/unpack_c16.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_COMPLEX_16) + +void +unpack0_c16 (gfc_array_c16 *ret, const gfc_array_c16 *vector, + const gfc_array_l1 *mask, const GFC_COMPLEX_16 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_16 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_16 *vptr; + /* Value for field, this is constant. */ + const GFC_COMPLEX_16 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_16)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_c16 (gfc_array_c16 *ret, const gfc_array_c16 *vector, + const gfc_array_l1 *mask, const gfc_array_c16 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_16 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_16 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_COMPLEX_16 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_16)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_c4.c b/libgfortran/generated/unpack_c4.c new file mode 100644 index 00000000000..472ce48c26e --- /dev/null +++ b/libgfortran/generated/unpack_c4.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_COMPLEX_4) + +void +unpack0_c4 (gfc_array_c4 *ret, const gfc_array_c4 *vector, + const gfc_array_l1 *mask, const GFC_COMPLEX_4 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_4 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_4 *vptr; + /* Value for field, this is constant. */ + const GFC_COMPLEX_4 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_4)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_c4 (gfc_array_c4 *ret, const gfc_array_c4 *vector, + const gfc_array_l1 *mask, const gfc_array_c4 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_4 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_4 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_COMPLEX_4 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_4)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_c8.c b/libgfortran/generated/unpack_c8.c new file mode 100644 index 00000000000..62116b78bb2 --- /dev/null +++ b/libgfortran/generated/unpack_c8.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_COMPLEX_8) + +void +unpack0_c8 (gfc_array_c8 *ret, const gfc_array_c8 *vector, + const gfc_array_l1 *mask, const GFC_COMPLEX_8 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_8 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_8 *vptr; + /* Value for field, this is constant. */ + const GFC_COMPLEX_8 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_8)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_c8 (gfc_array_c8 *ret, const gfc_array_c8 *vector, + const gfc_array_l1 *mask, const gfc_array_c8 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_COMPLEX_8 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_COMPLEX_8 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_COMPLEX_8 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_8)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_i1.c b/libgfortran/generated/unpack_i1.c new file mode 100644 index 00000000000..46a9d4eb6f2 --- /dev/null +++ b/libgfortran/generated/unpack_i1.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_INTEGER_1) + +void +unpack0_i1 (gfc_array_i1 *ret, const gfc_array_i1 *vector, + const gfc_array_l1 *mask, const GFC_INTEGER_1 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_1 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_1 *vptr; + /* Value for field, this is constant. */ + const GFC_INTEGER_1 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_1)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_i1 (gfc_array_i1 *ret, const gfc_array_i1 *vector, + const gfc_array_l1 *mask, const gfc_array_i1 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_1 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_1 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_INTEGER_1 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_1)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_i16.c b/libgfortran/generated/unpack_i16.c new file mode 100644 index 00000000000..0fbd7449ffe --- /dev/null +++ b/libgfortran/generated/unpack_i16.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_INTEGER_16) + +void +unpack0_i16 (gfc_array_i16 *ret, const gfc_array_i16 *vector, + const gfc_array_l1 *mask, const GFC_INTEGER_16 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_16 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_16 *vptr; + /* Value for field, this is constant. */ + const GFC_INTEGER_16 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_16)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_i16 (gfc_array_i16 *ret, const gfc_array_i16 *vector, + const gfc_array_l1 *mask, const gfc_array_i16 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_16 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_16 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_INTEGER_16 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_16)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_i2.c b/libgfortran/generated/unpack_i2.c new file mode 100644 index 00000000000..096c7858de1 --- /dev/null +++ b/libgfortran/generated/unpack_i2.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_INTEGER_2) + +void +unpack0_i2 (gfc_array_i2 *ret, const gfc_array_i2 *vector, + const gfc_array_l1 *mask, const GFC_INTEGER_2 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_2 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_2 *vptr; + /* Value for field, this is constant. */ + const GFC_INTEGER_2 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_2)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_i2 (gfc_array_i2 *ret, const gfc_array_i2 *vector, + const gfc_array_l1 *mask, const gfc_array_i2 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_2 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_2 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_INTEGER_2 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_2)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_i4.c b/libgfortran/generated/unpack_i4.c new file mode 100644 index 00000000000..08f197c376c --- /dev/null +++ b/libgfortran/generated/unpack_i4.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_INTEGER_4) + +void +unpack0_i4 (gfc_array_i4 *ret, const gfc_array_i4 *vector, + const gfc_array_l1 *mask, const GFC_INTEGER_4 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_4 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_4 *vptr; + /* Value for field, this is constant. */ + const GFC_INTEGER_4 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_4)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_i4 (gfc_array_i4 *ret, const gfc_array_i4 *vector, + const gfc_array_l1 *mask, const gfc_array_i4 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_4 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_4 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_INTEGER_4 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_4)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_i8.c b/libgfortran/generated/unpack_i8.c new file mode 100644 index 00000000000..0847c1fa0da --- /dev/null +++ b/libgfortran/generated/unpack_i8.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_INTEGER_8) + +void +unpack0_i8 (gfc_array_i8 *ret, const gfc_array_i8 *vector, + const gfc_array_l1 *mask, const GFC_INTEGER_8 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_8 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_8 *vptr; + /* Value for field, this is constant. */ + const GFC_INTEGER_8 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_8)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_i8 (gfc_array_i8 *ret, const gfc_array_i8 *vector, + const gfc_array_l1 *mask, const gfc_array_i8 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_INTEGER_8 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_INTEGER_8 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_INTEGER_8 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_8)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_r10.c b/libgfortran/generated/unpack_r10.c new file mode 100644 index 00000000000..694d2c542ee --- /dev/null +++ b/libgfortran/generated/unpack_r10.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_REAL_10) + +void +unpack0_r10 (gfc_array_r10 *ret, const gfc_array_r10 *vector, + const gfc_array_l1 *mask, const GFC_REAL_10 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_10 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_10 *vptr; + /* Value for field, this is constant. */ + const GFC_REAL_10 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_10)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_r10 (gfc_array_r10 *ret, const gfc_array_r10 *vector, + const gfc_array_l1 *mask, const gfc_array_r10 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_10 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_10 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_REAL_10 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_10)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_r16.c b/libgfortran/generated/unpack_r16.c new file mode 100644 index 00000000000..65121c1b90e --- /dev/null +++ b/libgfortran/generated/unpack_r16.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_REAL_16) + +void +unpack0_r16 (gfc_array_r16 *ret, const gfc_array_r16 *vector, + const gfc_array_l1 *mask, const GFC_REAL_16 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_16 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_16 *vptr; + /* Value for field, this is constant. */ + const GFC_REAL_16 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_16)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_r16 (gfc_array_r16 *ret, const gfc_array_r16 *vector, + const gfc_array_l1 *mask, const gfc_array_r16 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_16 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_16 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_REAL_16 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_16)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_r4.c b/libgfortran/generated/unpack_r4.c new file mode 100644 index 00000000000..b9983182b6f --- /dev/null +++ b/libgfortran/generated/unpack_r4.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_REAL_4) + +void +unpack0_r4 (gfc_array_r4 *ret, const gfc_array_r4 *vector, + const gfc_array_l1 *mask, const GFC_REAL_4 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_4 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_4 *vptr; + /* Value for field, this is constant. */ + const GFC_REAL_4 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_4)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_r4 (gfc_array_r4 *ret, const gfc_array_r4 *vector, + const gfc_array_l1 *mask, const gfc_array_r4 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_4 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_4 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_REAL_4 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_4)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + diff --git a/libgfortran/generated/unpack_r8.c b/libgfortran/generated/unpack_r8.c new file mode 100644 index 00000000000..cccf7596f9b --- /dev/null +++ b/libgfortran/generated/unpack_r8.c @@ -0,0 +1,338 @@ +/* Specific implementation of the UNPACK intrinsic + Copyright 2008 Free Software Foundation, Inc. + Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on + unpack_generic.c by Paul Brook <paul@nowt.org>. + +This file is part of the GNU Fortran 95 runtime library (libgfortran). + +Libgfortran is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combine +executable.) + +Ligbfortran is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public +License along with libgfortran; see the file COPYING. If not, +write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +#include "libgfortran.h" +#include <stdlib.h> +#include <assert.h> +#include <string.h> + + +#if defined (HAVE_GFC_REAL_8) + +void +unpack0_r8 (gfc_array_r8 *ret, const gfc_array_r8 *vector, + const gfc_array_l1 *mask, const GFC_REAL_8 *fptr) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_8 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_8 *vptr; + /* Value for field, this is constant. */ + const GFC_REAL_8 fval = *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_8)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = fval; + } + /* Advance to the next element. */ + rptr += rstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + mptr += mstride[n]; + } + } + } +} + +void +unpack1_r8 (gfc_array_r8 *ret, const gfc_array_r8 *vector, + const gfc_array_l1 *mask, const gfc_array_r8 *field) +{ + /* r.* indicates the return array. */ + index_type rstride[GFC_MAX_DIMENSIONS]; + index_type rstride0; + index_type rs; + GFC_REAL_8 *rptr; + /* v.* indicates the vector array. */ + index_type vstride0; + GFC_REAL_8 *vptr; + /* f.* indicates the field array. */ + index_type fstride[GFC_MAX_DIMENSIONS]; + index_type fstride0; + const GFC_REAL_8 *fptr; + /* m.* indicates the mask array. */ + index_type mstride[GFC_MAX_DIMENSIONS]; + index_type mstride0; + const GFC_LOGICAL_1 *mptr; + + index_type count[GFC_MAX_DIMENSIONS]; + index_type extent[GFC_MAX_DIMENSIONS]; + index_type n; + index_type dim; + + int empty; + int mask_kind; + + empty = 0; + + mptr = mask->data; + + /* Use the same loop for all logical types, by using GFC_LOGICAL_1 + and using shifting to address size and endian issues. */ + + mask_kind = GFC_DESCRIPTOR_SIZE (mask); + + if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 +#ifdef HAVE_GFC_LOGICAL_16 + || mask_kind == 16 +#endif + ) + { + /* Do not convert a NULL pointer as we use test for NULL below. */ + if (mptr) + mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); + } + else + runtime_error ("Funny sized logical array"); + + if (ret->data == NULL) + { + /* The front end has signalled that we need to populate the + return array descriptor. */ + dim = GFC_DESCRIPTOR_RANK (mask); + rs = 1; + for (n = 0; n < dim; n++) + { + count[n] = 0; + ret->dim[n].stride = rs; + ret->dim[n].lbound = 0; + ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound; + extent[n] = ret->dim[n].ubound + 1; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + rs *= extent[n]; + } + ret->offset = 0; + ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_8)); + } + else + { + dim = GFC_DESCRIPTOR_RANK (ret); + for (n = 0; n < dim; n++) + { + count[n] = 0; + extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound; + empty = empty || extent[n] <= 0; + rstride[n] = ret->dim[n].stride; + fstride[n] = field->dim[n].stride; + mstride[n] = mask->dim[n].stride * mask_kind; + } + if (rstride[0] == 0) + rstride[0] = 1; + } + + if (empty) + return; + + if (fstride[0] == 0) + fstride[0] = 1; + if (mstride[0] == 0) + mstride[0] = 1; + + vstride0 = vector->dim[0].stride; + if (vstride0 == 0) + vstride0 = 1; + rstride0 = rstride[0]; + fstride0 = fstride[0]; + mstride0 = mstride[0]; + rptr = ret->data; + fptr = field->data; + vptr = vector->data; + + while (rptr) + { + if (*mptr) + { + /* From vector. */ + *rptr = *vptr; + vptr += vstride0; + } + else + { + /* From field. */ + *rptr = *fptr; + } + /* Advance to the next element. */ + rptr += rstride0; + fptr += fstride0; + mptr += mstride0; + count[0]++; + n = 0; + while (count[n] == extent[n]) + { + /* When we get to the end of a dimension, reset it and increment + the next dimension. */ + count[n] = 0; + /* We could precalculate these products, but this is a less + frequently used path so probably not worth it. */ + rptr -= rstride[n] * extent[n]; + fptr -= fstride[n] * extent[n]; + mptr -= mstride[n] * extent[n]; + n++; + if (n >= dim) + { + /* Break out of the loop. */ + rptr = NULL; + break; + } + else + { + count[n]++; + rptr += rstride[n]; + fptr += fstride[n]; + mptr += mstride[n]; + } + } + } +} + +#endif + |