new package: lfortran

This commit is contained in:
Tee KOBAYASHI 2022-01-30 11:43:52 +09:00 committed by xtkoba
parent 0773feac34
commit 2e3100bca7
12 changed files with 1602 additions and 0 deletions

View File

@ -0,0 +1,11 @@
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -82,7 +82,7 @@
# Find ZLIB with our custom finder before including LLVM since the finder for LLVM
# might search for ZLIB again and find the shared libraries instead of the static ones
-find_package(StaticZLIB REQUIRED)
+find_package(ZLIB)
# LLVM
set(WITH_LLVM no CACHE BOOL "Build with LLVM support")

View File

@ -0,0 +1,39 @@
TERMUX_PKG_HOMEPAGE=https://lfortran.org/
TERMUX_PKG_DESCRIPTION="A modern open-source interactive Fortran compiler"
TERMUX_PKG_LICENSE="BSD 3-Clause"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=0.14.0
TERMUX_PKG_SRCURL=https://gitlab.com/lfortran/lfortran.git
TERMUX_PKG_DEPENDS="clang, libc++, libkokkos, zlib"
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="-DBUILD_SHARED_LIBS=ON"
TERMUX_PKG_HOSTBUILD=true
# ```
# [...]/src/lfortran/parser/parser_stype.h:97:1: error: static_assert failed
# due to requirement
# 'sizeof(LFortran::YYSTYPE) == sizeof(LFortran::Vec<LFortran::AST::ast_t *>)'
# static_assert(sizeof(YYSTYPE) == sizeof(Vec<AST::ast_t*>));
# ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ```
# Furthermore libkokkos does not support ILP32
TERMUX_PKG_BLACKLISTED_ARCHES="arm, i686"
termux_step_host_build() {
termux_setup_cmake
( cd $TERMUX_PKG_SRCDIR && sh build0.sh )
cmake $TERMUX_PKG_SRCDIR
make -j $TERMUX_MAKE_PROCESSES
}
termux_step_pre_configure() {
PATH=$TERMUX_PKG_HOSTBUILD_DIR/src/bin:$PATH
( cd $TERMUX_PKG_SRCDIR && sh build0.sh )
for f in fpmath.h math_private.h s_clog.c s_clogf.c s_cpowf.c; do
cp $TERMUX_PKG_BUILDER_DIR/$f $TERMUX_PKG_SRCDIR/src/runtime/impure/
done
LDFLAGS+=" -lm"
}

View File

@ -0,0 +1,20 @@
--- a/cmake/UserOverride.cmake
+++ b/cmake/UserOverride.cmake
@@ -9,7 +9,7 @@
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
# g++
set(common "-Wall -Wextra")
- set(CMAKE_CXX_FLAGS_RELEASE_INIT "${common} -O3 -march=native -funroll-loops -DNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELEASE_INIT "${common} -O3 -funroll-loops -DNDEBUG")
set(CMAKE_CXX_FLAGS_DEBUG_INIT "${common} -g -ggdb")
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
# icpc
@@ -19,7 +19,7 @@
elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
# clang
set(common "-Wall -Wextra")
- set(CMAKE_CXX_FLAGS_RELEASE_INIT "${common} -O3 -march=native -funroll-loops -DNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELEASE_INIT "${common} -O3 -funroll-loops -DNDEBUG")
set(CMAKE_CXX_FLAGS_DEBUG_INIT "${common} -g -ggdb")
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "PGI")
# pgcpp

View File

@ -0,0 +1,93 @@
/*-
* Copyright (c) 2003 Mike Barcroft <mike@FreeBSD.org>
* Copyright (c) 2002 David Schultz <das@FreeBSD.ORG>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
// ANDROID changed:
// - keep only little endian variants as they're the only one supported.
// - add long double structures here instead of _fpmath.h.
// - android uses 128 bits long doubles for LP64, so the structure and macros
// were reworked for the quad precision ieee representation.
#pragma once
#include <endian.h>
union IEEEf2bits {
float f;
struct {
unsigned int man :23;
unsigned int exp :8;
unsigned int sign :1;
} bits;
};
#define DBL_MANH_SIZE 20
#define DBL_MANL_SIZE 32
union IEEEd2bits {
double d;
struct {
unsigned int manl :32;
unsigned int manh :20;
unsigned int exp :11;
unsigned int sign :1;
} bits;
};
#ifdef __LP64__
union IEEEl2bits {
long double e;
struct {
unsigned long manl :64;
unsigned long manh :48;
unsigned int exp :15;
unsigned int sign :1;
} bits;
struct {
unsigned long manl :64;
unsigned long manh :48;
unsigned int expsign :16;
} xbits;
};
#define LDBL_NBIT 0
#define LDBL_IMPLICIT_NBIT
#define mask_nbit_l(u) ((void)0)
#define LDBL_MANH_SIZE 48
#define LDBL_MANL_SIZE 64
#define LDBL_TO_ARRAY32(u, a) do { \
(a)[0] = (uint32_t)(u).bits.manl; \
(a)[1] = (uint32_t)((u).bits.manl >> 32); \
(a)[2] = (uint32_t)(u).bits.manh; \
(a)[3] = (uint32_t)((u).bits.manh >> 32); \
} while(0)
#endif // __LP64__

View File

@ -0,0 +1,924 @@
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/*
* from: @(#)fdlibm.h 5.1 93/09/24
* $FreeBSD$
*/
#ifndef _MATH_PRIVATE_H_
#define _MATH_PRIVATE_H_
#include <sys/types.h>
#include <sys/endian.h>
/*
* The original fdlibm code used statements like:
* n0 = ((*(int*)&one)>>29)^1; * index of high word *
* ix0 = *(n0+(int*)&x); * high word of x *
* ix1 = *((1-n0)+(int*)&x); * low word of x *
* to dig two 32 bit words out of the 64 bit IEEE floating point
* value. That is non-ANSI, and, moreover, the gcc instruction
* scheduler gets it wrong. We instead use the following macros.
* Unlike the original code, we determine the endianness at compile
* time, not at run time; I don't see much benefit to selecting
* endianness at run time.
*/
/*
* A union which permits us to convert between a double and two 32 bit
* ints.
*/
#ifdef __arm__
#if defined(__VFP_FP__) || defined(__ARM_EABI__)
#define IEEE_WORD_ORDER BYTE_ORDER
#else
#define IEEE_WORD_ORDER BIG_ENDIAN
#endif
#else /* __arm__ */
#define IEEE_WORD_ORDER BYTE_ORDER
#endif
/* A union which permits us to convert between a long double and
four 32 bit ints. */
#if IEEE_WORD_ORDER == BIG_ENDIAN
typedef union
{
long double value;
struct {
u_int32_t mswhi;
u_int32_t mswlo;
u_int32_t lswhi;
u_int32_t lswlo;
} parts32;
struct {
u_int64_t msw;
u_int64_t lsw;
} parts64;
} ieee_quad_shape_type;
#endif
#if IEEE_WORD_ORDER == LITTLE_ENDIAN
typedef union
{
long double value;
struct {
u_int32_t lswlo;
u_int32_t lswhi;
u_int32_t mswlo;
u_int32_t mswhi;
} parts32;
struct {
u_int64_t lsw;
u_int64_t msw;
} parts64;
} ieee_quad_shape_type;
#endif
#if IEEE_WORD_ORDER == BIG_ENDIAN
typedef union
{
double value;
struct
{
u_int32_t msw;
u_int32_t lsw;
} parts;
struct
{
u_int64_t w;
} xparts;
} ieee_double_shape_type;
#endif
#if IEEE_WORD_ORDER == LITTLE_ENDIAN
typedef union
{
double value;
struct
{
u_int32_t lsw;
u_int32_t msw;
} parts;
struct
{
u_int64_t w;
} xparts;
} ieee_double_shape_type;
#endif
/* Get two 32 bit ints from a double. */
#define EXTRACT_WORDS(ix0,ix1,d) \
do { \
ieee_double_shape_type ew_u; \
ew_u.value = (d); \
(ix0) = ew_u.parts.msw; \
(ix1) = ew_u.parts.lsw; \
} while (0)
/* Get a 64-bit int from a double. */
#define EXTRACT_WORD64(ix,d) \
do { \
ieee_double_shape_type ew_u; \
ew_u.value = (d); \
(ix) = ew_u.xparts.w; \
} while (0)
/* Get the more significant 32 bit int from a double. */
#define GET_HIGH_WORD(i,d) \
do { \
ieee_double_shape_type gh_u; \
gh_u.value = (d); \
(i) = gh_u.parts.msw; \
} while (0)
/* Get the less significant 32 bit int from a double. */
#define GET_LOW_WORD(i,d) \
do { \
ieee_double_shape_type gl_u; \
gl_u.value = (d); \
(i) = gl_u.parts.lsw; \
} while (0)
/* Set a double from two 32 bit ints. */
#define INSERT_WORDS(d,ix0,ix1) \
do { \
ieee_double_shape_type iw_u; \
iw_u.parts.msw = (ix0); \
iw_u.parts.lsw = (ix1); \
(d) = iw_u.value; \
} while (0)
/* Set a double from a 64-bit int. */
#define INSERT_WORD64(d,ix) \
do { \
ieee_double_shape_type iw_u; \
iw_u.xparts.w = (ix); \
(d) = iw_u.value; \
} while (0)
/* Set the more significant 32 bits of a double from an int. */
#define SET_HIGH_WORD(d,v) \
do { \
ieee_double_shape_type sh_u; \
sh_u.value = (d); \
sh_u.parts.msw = (v); \
(d) = sh_u.value; \
} while (0)
/* Set the less significant 32 bits of a double from an int. */
#define SET_LOW_WORD(d,v) \
do { \
ieee_double_shape_type sl_u; \
sl_u.value = (d); \
sl_u.parts.lsw = (v); \
(d) = sl_u.value; \
} while (0)
/*
* A union which permits us to convert between a float and a 32 bit
* int.
*/
typedef union
{
float value;
/* FIXME: Assumes 32 bit int. */
unsigned int word;
} ieee_float_shape_type;
/* Get a 32 bit int from a float. */
#define GET_FLOAT_WORD(i,d) \
do { \
ieee_float_shape_type gf_u; \
gf_u.value = (d); \
(i) = gf_u.word; \
} while (0)
/* Set a float from a 32 bit int. */
#define SET_FLOAT_WORD(d,i) \
do { \
ieee_float_shape_type sf_u; \
sf_u.word = (i); \
(d) = sf_u.value; \
} while (0)
/*
* Get expsign and mantissa as 16 bit and 64 bit ints from an 80 bit long
* double.
*/
#define EXTRACT_LDBL80_WORDS(ix0,ix1,d) \
do { \
union IEEEl2bits ew_u; \
ew_u.e = (d); \
(ix0) = ew_u.xbits.expsign; \
(ix1) = ew_u.xbits.man; \
} while (0)
/*
* Get expsign and mantissa as one 16 bit and two 64 bit ints from a 128 bit
* long double.
*/
#define EXTRACT_LDBL128_WORDS(ix0,ix1,ix2,d) \
do { \
union IEEEl2bits ew_u; \
ew_u.e = (d); \
(ix0) = ew_u.xbits.expsign; \
(ix1) = ew_u.xbits.manh; \
(ix2) = ew_u.xbits.manl; \
} while (0)
/* Get expsign as a 16 bit int from a long double. */
#define GET_LDBL_EXPSIGN(i,d) \
do { \
union IEEEl2bits ge_u; \
ge_u.e = (d); \
(i) = ge_u.xbits.expsign; \
} while (0)
/*
* Set an 80 bit long double from a 16 bit int expsign and a 64 bit int
* mantissa.
*/
#define INSERT_LDBL80_WORDS(d,ix0,ix1) \
do { \
union IEEEl2bits iw_u; \
iw_u.xbits.expsign = (ix0); \
iw_u.xbits.man = (ix1); \
(d) = iw_u.e; \
} while (0)
/*
* Set a 128 bit long double from a 16 bit int expsign and two 64 bit ints
* comprising the mantissa.
*/
#define INSERT_LDBL128_WORDS(d,ix0,ix1,ix2) \
do { \
union IEEEl2bits iw_u; \
iw_u.xbits.expsign = (ix0); \
iw_u.xbits.manh = (ix1); \
iw_u.xbits.manl = (ix2); \
(d) = iw_u.e; \
} while (0)
/* Set expsign of a long double from a 16 bit int. */
#define SET_LDBL_EXPSIGN(d,v) \
do { \
union IEEEl2bits se_u; \
se_u.e = (d); \
se_u.xbits.expsign = (v); \
(d) = se_u.e; \
} while (0)
#ifdef __i386__
/* Long double constants are broken on i386. */
#define LD80C(m, ex, v) { \
.xbits.man = __CONCAT(m, ULL), \
.xbits.expsign = (0x3fff + (ex)) | ((v) < 0 ? 0x8000 : 0), \
}
#else
/* The above works on non-i386 too, but we use this to check v. */
#define LD80C(m, ex, v) { .e = (v), }
#endif
#ifdef FLT_EVAL_METHOD
/*
* Attempt to get strict C99 semantics for assignment with non-C99 compilers.
*/
#if FLT_EVAL_METHOD == 0 || __GNUC__ == 0
#define STRICT_ASSIGN(type, lval, rval) ((lval) = (rval))
#else
#define STRICT_ASSIGN(type, lval, rval) do { \
volatile type __lval; \
\
if (sizeof(type) >= sizeof(long double)) \
(lval) = (rval); \
else { \
__lval = (rval); \
(lval) = __lval; \
} \
} while (0)
#endif
#endif /* FLT_EVAL_METHOD */
/* Support switching the mode to FP_PE if necessary. */
#if defined(__i386__) && !defined(NO_FPSETPREC)
#define ENTERI() ENTERIT(long double)
#define ENTERIT(returntype) \
returntype __retval; \
fp_prec_t __oprec; \
\
if ((__oprec = fpgetprec()) != FP_PE) \
fpsetprec(FP_PE)
#define RETURNI(x) do { \
__retval = (x); \
if (__oprec != FP_PE) \
fpsetprec(__oprec); \
RETURNF(__retval); \
} while (0)
#define ENTERV() \
fp_prec_t __oprec; \
\
if ((__oprec = fpgetprec()) != FP_PE) \
fpsetprec(FP_PE)
#define RETURNV() do { \
if (__oprec != FP_PE) \
fpsetprec(__oprec); \
return; \
} while (0)
#else
#define ENTERI()
#define ENTERIT(x)
#define RETURNI(x) RETURNF(x)
#define ENTERV()
#define RETURNV() return
#endif
/* Default return statement if hack*_t() is not used. */
#define RETURNF(v) return (v)
/*
* 2sum gives the same result as 2sumF without requiring |a| >= |b| or
* a == 0, but is slower.
*/
#define _2sum(a, b) do { \
__typeof(a) __s, __w; \
\
__w = (a) + (b); \
__s = __w - (a); \
(b) = ((a) - (__w - __s)) + ((b) - __s); \
(a) = __w; \
} while (0)
/*
* 2sumF algorithm.
*
* "Normalize" the terms in the infinite-precision expression a + b for
* the sum of 2 floating point values so that b is as small as possible
* relative to 'a'. (The resulting 'a' is the value of the expression in
* the same precision as 'a' and the resulting b is the rounding error.)
* |a| must be >= |b| or 0, b's type must be no larger than 'a's type, and
* exponent overflow or underflow must not occur. This uses a Theorem of
* Dekker (1971). See Knuth (1981) 4.2.2 Theorem C. The name "TwoSum"
* is apparently due to Skewchuk (1997).
*
* For this to always work, assignment of a + b to 'a' must not retain any
* extra precision in a + b. This is required by C standards but broken
* in many compilers. The brokenness cannot be worked around using
* STRICT_ASSIGN() like we do elsewhere, since the efficiency of this
* algorithm would be destroyed by non-null strict assignments. (The
* compilers are correct to be broken -- the efficiency of all floating
* point code calculations would be destroyed similarly if they forced the
* conversions.)
*
* Fortunately, a case that works well can usually be arranged by building
* any extra precision into the type of 'a' -- 'a' should have type float_t,
* double_t or long double. b's type should be no larger than 'a's type.
* Callers should use these types with scopes as large as possible, to
* reduce their own extra-precision and efficiciency problems. In
* particular, they shouldn't convert back and forth just to call here.
*/
#ifdef DEBUG
#define _2sumF(a, b) do { \
__typeof(a) __w; \
volatile __typeof(a) __ia, __ib, __r, __vw; \
\
__ia = (a); \
__ib = (b); \
assert(__ia == 0 || fabsl(__ia) >= fabsl(__ib)); \
\
__w = (a) + (b); \
(b) = ((a) - __w) + (b); \
(a) = __w; \
\
/* The next 2 assertions are weak if (a) is already long double. */ \
assert((long double)__ia + __ib == (long double)(a) + (b)); \
__vw = __ia + __ib; \
__r = __ia - __vw; \
__r += __ib; \
assert(__vw == (a) && __r == (b)); \
} while (0)
#else /* !DEBUG */
#define _2sumF(a, b) do { \
__typeof(a) __w; \
\
__w = (a) + (b); \
(b) = ((a) - __w) + (b); \
(a) = __w; \
} while (0)
#endif /* DEBUG */
/*
* Set x += c, where x is represented in extra precision as a + b.
* x must be sufficiently normalized and sufficiently larger than c,
* and the result is then sufficiently normalized.
*
* The details of ordering are that |a| must be >= |c| (so that (a, c)
* can be normalized without extra work to swap 'a' with c). The details of
* the normalization are that b must be small relative to the normalized 'a'.
* Normalization of (a, c) makes the normalized c tiny relative to the
* normalized a, so b remains small relative to 'a' in the result. However,
* b need not ever be tiny relative to 'a'. For example, b might be about
* 2**20 times smaller than 'a' to give about 20 extra bits of precision.
* That is usually enough, and adding c (which by normalization is about
* 2**53 times smaller than a) cannot change b significantly. However,
* cancellation of 'a' with c in normalization of (a, c) may reduce 'a'
* significantly relative to b. The caller must ensure that significant
* cancellation doesn't occur, either by having c of the same sign as 'a',
* or by having |c| a few percent smaller than |a|. Pre-normalization of
* (a, b) may help.
*
* This is is a variant of an algorithm of Kahan (see Knuth (1981) 4.2.2
* exercise 19). We gain considerable efficiency by requiring the terms to
* be sufficiently normalized and sufficiently increasing.
*/
#define _3sumF(a, b, c) do { \
__typeof(a) __tmp; \
\
__tmp = (c); \
_2sumF(__tmp, (a)); \
(b) += (a); \
(a) = __tmp; \
} while (0)
/*
* Common routine to process the arguments to nan(), nanf(), and nanl().
*/
void _scan_nan(uint32_t *__words, int __num_words, const char *__s);
/*
* Mix 0, 1 or 2 NaNs. First add 0 to each arg. This normally just turns
* signaling NaNs into quiet NaNs by setting a quiet bit. We do this
* because we want to never return a signaling NaN, and also because we
* don't want the quiet bit to affect the result. Then mix the converted
* args using the specified operation.
*
* When one arg is NaN, the result is typically that arg quieted. When both
* args are NaNs, the result is typically the quietening of the arg whose
* mantissa is largest after quietening. When neither arg is NaN, the
* result may be NaN because it is indeterminate, or finite for subsequent
* construction of a NaN as the indeterminate 0.0L/0.0L.
*
* Technical complications: the result in bits after rounding to the final
* precision might depend on the runtime precision and/or on compiler
* optimizations, especially when different register sets are used for
* different precisions. Try to make the result not depend on at least the
* runtime precision by always doing the main mixing step in long double
* precision. Try to reduce dependencies on optimizations by adding the
* the 0's in different precisions (unless everything is in long double
* precision).
*/
#define nan_mix(x, y) (nan_mix_op((x), (y), +))
#define nan_mix_op(x, y, op) (((x) + 0.0L) op ((y) + 0))
#ifdef _COMPLEX_H
/*
* C99 specifies that complex numbers have the same representation as
* an array of two elements, where the first element is the real part
* and the second element is the imaginary part.
*/
typedef union {
float complex f;
float a[2];
} float_complex;
typedef union {
double complex f;
double a[2];
} double_complex;
typedef union {
long double complex f;
long double a[2];
} long_double_complex;
#define REALPART(z) ((z).a[0])
#define IMAGPART(z) ((z).a[1])
/*
* Inline functions that can be used to construct complex values.
*
* The C99 standard intends x+I*y to be used for this, but x+I*y is
* currently unusable in general since gcc introduces many overflow,
* underflow, sign and efficiency bugs by rewriting I*y as
* (0.0+I)*(y+0.0*I) and laboriously computing the full complex product.
* In particular, I*Inf is corrupted to NaN+I*Inf, and I*-0 is corrupted
* to -0.0+I*0.0.
*
* The C11 standard introduced the macros CMPLX(), CMPLXF() and CMPLXL()
* to construct complex values. Compilers that conform to the C99
* standard require the following functions to avoid the above issues.
*/
#ifndef CMPLXF
static __inline float complex
CMPLXF(float x, float y)
{
float_complex z;
REALPART(z) = x;
IMAGPART(z) = y;
return (z.f);
}
#endif
#ifndef CMPLX
static __inline double complex
CMPLX(double x, double y)
{
double_complex z;
REALPART(z) = x;
IMAGPART(z) = y;
return (z.f);
}
#endif
#ifndef CMPLXL
static __inline long double complex
CMPLXL(long double x, long double y)
{
long_double_complex z;
REALPART(z) = x;
IMAGPART(z) = y;
return (z.f);
}
#endif
#endif /* _COMPLEX_H */
/*
* The rnint() family rounds to the nearest integer for a restricted range
* range of args (up to about 2**MANT_DIG). We assume that the current
* rounding mode is FE_TONEAREST so that this can be done efficiently.
* Extra precision causes more problems in practice, and we only centralize
* this here to reduce those problems, and have not solved the efficiency
* problems. The exp2() family uses a more delicate version of this that
* requires extracting bits from the intermediate value, so it is not
* centralized here and should copy any solution of the efficiency problems.
*/
static inline double
rnint(__double_t x)
{
/*
* This casts to double to kill any extra precision. This depends
* on the cast being applied to a double_t to avoid compiler bugs
* (this is a cleaner version of STRICT_ASSIGN()). This is
* inefficient if there actually is extra precision, but is hard
* to improve on. We use double_t in the API to minimise conversions
* for just calling here. Note that we cannot easily change the
* magic number to the one that works directly with double_t, since
* the rounding precision is variable at runtime on x86 so the
* magic number would need to be variable. Assuming that the
* rounding precision is always the default is too fragile. This
* and many other complications will move when the default is
* changed to FP_PE.
*/
return ((double)(x + 0x1.8p52) - 0x1.8p52);
}
static inline float
rnintf(__float_t x)
{
/*
* As for rnint(), except we could just call that to handle the
* extra precision case, usually without losing efficiency.
*/
return ((float)(x + 0x1.8p23F) - 0x1.8p23F);
}
#ifdef LDBL_MANT_DIG
/*
* The complications for extra precision are smaller for rnintl() since it
* can safely assume that the rounding precision has been increased from
* its default to FP_PE on x86. We don't exploit that here to get small
* optimizations from limiting the rangle to double. We just need it for
* the magic number to work with long doubles. ld128 callers should use
* rnint() instead of this if possible. ld80 callers should prefer
* rnintl() since for amd64 this avoids swapping the register set, while
* for i386 it makes no difference (assuming FP_PE), and for other arches
* it makes little difference.
*/
static inline long double
rnintl(long double x)
{
return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 -
__CONCAT(0x1.8p, LDBL_MANT_DIG) / 2);
}
#endif /* LDBL_MANT_DIG */
/*
* irint() and i64rint() give the same result as casting to their integer
* return type provided their arg is a floating point integer. They can
* sometimes be more efficient because no rounding is required.
*/
#if (defined(amd64) || defined(__i386__)) && defined(__GNUCLIKE_ASM)
#define irint(x) \
(sizeof(x) == sizeof(float) && \
sizeof(__float_t) == sizeof(long double) ? irintf(x) : \
sizeof(x) == sizeof(double) && \
sizeof(__double_t) == sizeof(long double) ? irintd(x) : \
sizeof(x) == sizeof(long double) ? irintl(x) : (int)(x))
#else
#define irint(x) ((int)(x))
#endif
#define i64rint(x) ((int64_t)(x)) /* only needed for ld128 so not opt. */
#if defined(__i386__) && defined(__GNUCLIKE_ASM)
static __inline int
irintf(float x)
{
int n;
__asm("fistl %0" : "=m" (n) : "t" (x));
return (n);
}
static __inline int
irintd(double x)
{
int n;
__asm("fistl %0" : "=m" (n) : "t" (x));
return (n);
}
#endif
#if (defined(__amd64__) || defined(__i386__)) && defined(__GNUCLIKE_ASM)
static __inline int
irintl(long double x)
{
int n;
__asm("fistl %0" : "=m" (n) : "t" (x));
return (n);
}
#endif
#ifdef DEBUG
#if defined(__amd64__) || defined(__i386__)
#define breakpoint() asm("int $3")
#else
#include <signal.h>
#define breakpoint() raise(SIGTRAP)
#endif
#endif
/* Write a pari script to test things externally. */
#ifdef DOPRINT
#include <stdio.h>
#ifndef DOPRINT_SWIZZLE
#define DOPRINT_SWIZZLE 0
#endif
#ifdef DOPRINT_LD80
#define DOPRINT_START(xp) do { \
uint64_t __lx; \
uint16_t __hx; \
\
/* Hack to give more-problematic args. */ \
EXTRACT_LDBL80_WORDS(__hx, __lx, *xp); \
__lx ^= DOPRINT_SWIZZLE; \
INSERT_LDBL80_WORDS(*xp, __hx, __lx); \
printf("x = %.21Lg; ", (long double)*xp); \
} while (0)
#define DOPRINT_END1(v) \
printf("y = %.21Lg; z = 0; show(x, y, z);\n", (long double)(v))
#define DOPRINT_END2(hi, lo) \
printf("y = %.21Lg; z = %.21Lg; show(x, y, z);\n", \
(long double)(hi), (long double)(lo))
#elif defined(DOPRINT_D64)
#define DOPRINT_START(xp) do { \
uint32_t __hx, __lx; \
\
EXTRACT_WORDS(__hx, __lx, *xp); \
__lx ^= DOPRINT_SWIZZLE; \
INSERT_WORDS(*xp, __hx, __lx); \
printf("x = %.21Lg; ", (long double)*xp); \
} while (0)
#define DOPRINT_END1(v) \
printf("y = %.21Lg; z = 0; show(x, y, z);\n", (long double)(v))
#define DOPRINT_END2(hi, lo) \
printf("y = %.21Lg; z = %.21Lg; show(x, y, z);\n", \
(long double)(hi), (long double)(lo))
#elif defined(DOPRINT_F32)
#define DOPRINT_START(xp) do { \
uint32_t __hx; \
\
GET_FLOAT_WORD(__hx, *xp); \
__hx ^= DOPRINT_SWIZZLE; \
SET_FLOAT_WORD(*xp, __hx); \
printf("x = %.21Lg; ", (long double)*xp); \
} while (0)
#define DOPRINT_END1(v) \
printf("y = %.21Lg; z = 0; show(x, y, z);\n", (long double)(v))
#define DOPRINT_END2(hi, lo) \
printf("y = %.21Lg; z = %.21Lg; show(x, y, z);\n", \
(long double)(hi), (long double)(lo))
#else /* !DOPRINT_LD80 && !DOPRINT_D64 (LD128 only) */
#ifndef DOPRINT_SWIZZLE_HIGH
#define DOPRINT_SWIZZLE_HIGH 0
#endif
#define DOPRINT_START(xp) do { \
uint64_t __lx, __llx; \
uint16_t __hx; \
\
EXTRACT_LDBL128_WORDS(__hx, __lx, __llx, *xp); \
__llx ^= DOPRINT_SWIZZLE; \
__lx ^= DOPRINT_SWIZZLE_HIGH; \
INSERT_LDBL128_WORDS(*xp, __hx, __lx, __llx); \
printf("x = %.36Lg; ", (long double)*xp); \
} while (0)
#define DOPRINT_END1(v) \
printf("y = %.36Lg; z = 0; show(x, y, z);\n", (long double)(v))
#define DOPRINT_END2(hi, lo) \
printf("y = %.36Lg; z = %.36Lg; show(x, y, z);\n", \
(long double)(hi), (long double)(lo))
#endif /* DOPRINT_LD80 */
#else /* !DOPRINT */
#define DOPRINT_START(xp)
#define DOPRINT_END1(v)
#define DOPRINT_END2(hi, lo)
#endif /* DOPRINT */
#define RETURNP(x) do { \
DOPRINT_END1(x); \
RETURNF(x); \
} while (0)
#define RETURNPI(x) do { \
DOPRINT_END1(x); \
RETURNI(x); \
} while (0)
#define RETURN2P(x, y) do { \
DOPRINT_END2((x), (y)); \
RETURNF((x) + (y)); \
} while (0)
#define RETURN2PI(x, y) do { \
DOPRINT_END2((x), (y)); \
RETURNI((x) + (y)); \
} while (0)
#ifdef STRUCT_RETURN
#define RETURNSP(rp) do { \
if (!(rp)->lo_set) \
RETURNP((rp)->hi); \
RETURN2P((rp)->hi, (rp)->lo); \
} while (0)
#define RETURNSPI(rp) do { \
if (!(rp)->lo_set) \
RETURNPI((rp)->hi); \
RETURN2PI((rp)->hi, (rp)->lo); \
} while (0)
#endif
#define SUM2P(x, y) ({ \
const __typeof (x) __x = (x); \
const __typeof (y) __y = (y); \
\
DOPRINT_END2(__x, __y); \
__x + __y; \
})
/*
* ieee style elementary functions
*
* We rename functions here to improve other sources' diffability
* against fdlibm.
*/
#define __ieee754_sqrt sqrt
#define __ieee754_acos acos
#define __ieee754_acosh acosh
#define __ieee754_log log
#define __ieee754_log2 log2
#define __ieee754_atanh atanh
#define __ieee754_asin asin
#define __ieee754_atan2 atan2
#define __ieee754_exp exp
#define __ieee754_cosh cosh
#define __ieee754_fmod fmod
#define __ieee754_pow pow
#define __ieee754_lgamma lgamma
#define __ieee754_gamma gamma
#define __ieee754_lgamma_r lgamma_r
#define __ieee754_gamma_r gamma_r
#define __ieee754_log10 log10
#define __ieee754_sinh sinh
#define __ieee754_hypot hypot
#define __ieee754_j0 j0
#define __ieee754_j1 j1
#define __ieee754_y0 y0
#define __ieee754_y1 y1
#define __ieee754_jn jn
#define __ieee754_yn yn
#define __ieee754_remainder remainder
#define __ieee754_scalb scalb
#define __ieee754_sqrtf sqrtf
#define __ieee754_acosf acosf
#define __ieee754_acoshf acoshf
#define __ieee754_logf logf
#define __ieee754_atanhf atanhf
#define __ieee754_asinf asinf
#define __ieee754_atan2f atan2f
#define __ieee754_expf expf
#define __ieee754_coshf coshf
#define __ieee754_fmodf fmodf
#define __ieee754_powf powf
#define __ieee754_lgammaf lgammaf
#define __ieee754_gammaf gammaf
#define __ieee754_lgammaf_r lgammaf_r
#define __ieee754_gammaf_r gammaf_r
#define __ieee754_log10f log10f
#define __ieee754_log2f log2f
#define __ieee754_sinhf sinhf
#define __ieee754_hypotf hypotf
#define __ieee754_j0f j0f
#define __ieee754_j1f j1f
#define __ieee754_y0f y0f
#define __ieee754_y1f y1f
#define __ieee754_jnf jnf
#define __ieee754_ynf ynf
#define __ieee754_remainderf remainderf
#define __ieee754_scalbf scalbf
/* fdlibm kernel function */
int __kernel_rem_pio2(double*,double*,int,int,int);
/* double precision kernel functions */
#ifndef INLINE_REM_PIO2
int __ieee754_rem_pio2(double,double*);
#endif
double __kernel_sin(double,double,int);
double __kernel_cos(double,double);
double __kernel_tan(double,double,int);
double __ldexp_exp(double,int);
#ifdef _COMPLEX_H
double complex __ldexp_cexp(double complex,int);
#endif
/* float precision kernel functions */
#ifndef INLINE_REM_PIO2F
int __ieee754_rem_pio2f(float,double*);
#endif
#ifndef INLINE_KERNEL_SINDF
float __kernel_sindf(double);
#endif
#ifndef INLINE_KERNEL_COSDF
float __kernel_cosdf(double);
#endif
#ifndef INLINE_KERNEL_TANDF
float __kernel_tandf(double,int);
#endif
float __ldexp_expf(float,int);
#ifdef _COMPLEX_H
float complex __ldexp_cexpf(float complex,int);
#endif
/* long double precision kernel functions */
long double __kernel_sinl(long double, long double, int);
long double __kernel_cosl(long double, long double);
long double __kernel_tanl(long double, long double, int);
#endif /* !_MATH_PRIVATE_H_ */

155
packages/lfortran/s_clog.c Normal file
View File

@ -0,0 +1,155 @@
/*-
* Copyright (c) 2013 Bruce D. Evans
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <complex.h>
#include <float.h>
#include "fpmath.h"
#include "math.h"
#include "math_private.h"
#define MANT_DIG DBL_MANT_DIG
#define MAX_EXP DBL_MAX_EXP
#define MIN_EXP DBL_MIN_EXP
static const double
ln2_hi = 6.9314718055829871e-1, /* 0x162e42fefa0000.0p-53 */
ln2_lo = 1.6465949582897082e-12; /* 0x1cf79abc9e3b3a.0p-92 */
double complex
clog(double complex z)
{
double_t ax, ax2h, ax2l, axh, axl, ay, ay2h, ay2l, ayh, ayl, sh, sl, t;
double x, y, v;
uint32_t hax, hay;
int kx, ky;
x = creal(z);
y = cimag(z);
v = atan2(y, x);
ax = fabs(x);
ay = fabs(y);
if (ax < ay) {
t = ax;
ax = ay;
ay = t;
}
GET_HIGH_WORD(hax, ax);
kx = (hax >> 20) - 1023;
GET_HIGH_WORD(hay, ay);
ky = (hay >> 20) - 1023;
/* Handle NaNs and Infs using the general formula. */
if (kx == MAX_EXP || ky == MAX_EXP)
return (CMPLX(log(hypot(x, y)), v));
/* Avoid spurious underflow, and reduce inaccuracies when ax is 1. */
if (ax == 1) {
if (ky < (MIN_EXP - 1) / 2)
return (CMPLX((ay / 2) * ay, v));
return (CMPLX(log1p(ay * ay) / 2, v));
}
/* Avoid underflow when ax is not small. Also handle zero args. */
if (kx - ky > MANT_DIG || ay == 0)
return (CMPLX(log(ax), v));
/* Avoid overflow. */
if (kx >= MAX_EXP - 1)
return (CMPLX(log(hypot(x * 0x1p-1022, y * 0x1p-1022)) +
(MAX_EXP - 2) * ln2_lo + (MAX_EXP - 2) * ln2_hi, v));
if (kx >= (MAX_EXP - 1) / 2)
return (CMPLX(log(hypot(x, y)), v));
/* Reduce inaccuracies and avoid underflow when ax is denormal. */
if (kx <= MIN_EXP - 2)
return (CMPLX(log(hypot(x * 0x1p1023, y * 0x1p1023)) +
(MIN_EXP - 2) * ln2_lo + (MIN_EXP - 2) * ln2_hi, v));
/* Avoid remaining underflows (when ax is small but not denormal). */
if (ky < (MIN_EXP - 1) / 2 + MANT_DIG)
return (CMPLX(log(hypot(x, y)), v));
/* Calculate ax*ax and ay*ay exactly using Dekker's algorithm. */
t = (double)(ax * (0x1p27 + 1));
axh = (double)(ax - t) + t;
axl = ax - axh;
ax2h = ax * ax;
ax2l = axh * axh - ax2h + 2 * axh * axl + axl * axl;
t = (double)(ay * (0x1p27 + 1));
ayh = (double)(ay - t) + t;
ayl = ay - ayh;
ay2h = ay * ay;
ay2l = ayh * ayh - ay2h + 2 * ayh * ayl + ayl * ayl;
/*
* When log(|z|) is far from 1, accuracy in calculating the sum
* of the squares is not very important since log() reduces
* inaccuracies. We depended on this to use the general
* formula when log(|z|) is very far from 1. When log(|z|) is
* moderately far from 1, we go through the extra-precision
* calculations to reduce branches and gain a little accuracy.
*
* When |z| is near 1, we subtract 1 and use log1p() and don't
* leave it to log() to subtract 1, since we gain at least 1 bit
* of accuracy in this way.
*
* When |z| is very near 1, subtracting 1 can cancel almost
* 3*MANT_DIG bits. We arrange that subtracting 1 is exact in
* doubled precision, and then do the rest of the calculation
* in sloppy doubled precision. Although large cancellations
* often lose lots of accuracy, here the final result is exact
* in doubled precision if the large calculation occurs (because
* then it is exact in tripled precision and the cancellation
* removes enough bits to fit in doubled precision). Thus the
* result is accurate in sloppy doubled precision, and the only
* significant loss of accuracy is when it is summed and passed
* to log1p().
*/
sh = ax2h;
sl = ay2h;
_2sumF(sh, sl);
if (sh < 0.5 || sh >= 3)
return (CMPLX(log(ay2l + ax2l + sl + sh) / 2, v));
sh -= 1;
_2sum(sh, sl);
_2sum(ax2l, ay2l);
/* Briggs-Kahan algorithm (except we discard the final low term): */
_2sum(sh, ax2l);
_2sum(sl, ay2l);
t = ax2l + sl;
_2sumF(sh, t);
return (CMPLX(log1p(ay2l + t + sh) / 2, v));
}
#if (LDBL_MANT_DIG == 53)
__weak_reference(clog, clogl);
#endif

151
packages/lfortran/s_clogf.c Normal file
View File

@ -0,0 +1,151 @@
/*-
* Copyright (c) 2013 Bruce D. Evans
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <complex.h>
#include <float.h>
#include "fpmath.h"
#include "math.h"
#include "math_private.h"
#define MANT_DIG FLT_MANT_DIG
#define MAX_EXP FLT_MAX_EXP
#define MIN_EXP FLT_MIN_EXP
static const float
ln2f_hi = 6.9314575195e-1, /* 0xb17200.0p-24 */
ln2f_lo = 1.4286067653e-6; /* 0xbfbe8e.0p-43 */
float complex
clogf(float complex z)
{
float_t ax, ax2h, ax2l, axh, axl, ay, ay2h, ay2l, ayh, ayl, sh, sl, t;
float x, y, v;
uint32_t hax, hay;
int kx, ky;
x = crealf(z);
y = cimagf(z);
v = atan2f(y, x);
ax = fabsf(x);
ay = fabsf(y);
if (ax < ay) {
t = ax;
ax = ay;
ay = t;
}
GET_FLOAT_WORD(hax, ax);
kx = (hax >> 23) - 127;
GET_FLOAT_WORD(hay, ay);
ky = (hay >> 23) - 127;
/* Handle NaNs and Infs using the general formula. */
if (kx == MAX_EXP || ky == MAX_EXP)
return (CMPLXF(logf(hypotf(x, y)), v));
/* Avoid spurious underflow, and reduce inaccuracies when ax is 1. */
if (hax == 0x3f800000) {
if (ky < (MIN_EXP - 1) / 2)
return (CMPLXF((ay / 2) * ay, v));
return (CMPLXF(log1pf(ay * ay) / 2, v));
}
/* Avoid underflow when ax is not small. Also handle zero args. */
if (kx - ky > MANT_DIG || hay == 0)
return (CMPLXF(logf(ax), v));
/* Avoid overflow. */
if (kx >= MAX_EXP - 1)
return (CMPLXF(logf(hypotf(x * 0x1p-126F, y * 0x1p-126F)) +
(MAX_EXP - 2) * ln2f_lo + (MAX_EXP - 2) * ln2f_hi, v));
if (kx >= (MAX_EXP - 1) / 2)
return (CMPLXF(logf(hypotf(x, y)), v));
/* Reduce inaccuracies and avoid underflow when ax is denormal. */
if (kx <= MIN_EXP - 2)
return (CMPLXF(logf(hypotf(x * 0x1p127F, y * 0x1p127F)) +
(MIN_EXP - 2) * ln2f_lo + (MIN_EXP - 2) * ln2f_hi, v));
/* Avoid remaining underflows (when ax is small but not denormal). */
if (ky < (MIN_EXP - 1) / 2 + MANT_DIG)
return (CMPLXF(logf(hypotf(x, y)), v));
/* Calculate ax*ax and ay*ay exactly using Dekker's algorithm. */
t = (float)(ax * (0x1p12F + 1));
axh = (float)(ax - t) + t;
axl = ax - axh;
ax2h = ax * ax;
ax2l = axh * axh - ax2h + 2 * axh * axl + axl * axl;
t = (float)(ay * (0x1p12F + 1));
ayh = (float)(ay - t) + t;
ayl = ay - ayh;
ay2h = ay * ay;
ay2l = ayh * ayh - ay2h + 2 * ayh * ayl + ayl * ayl;
/*
* When log(|z|) is far from 1, accuracy in calculating the sum
* of the squares is not very important since log() reduces
* inaccuracies. We depended on this to use the general
* formula when log(|z|) is very far from 1. When log(|z|) is
* moderately far from 1, we go through the extra-precision
* calculations to reduce branches and gain a little accuracy.
*
* When |z| is near 1, we subtract 1 and use log1p() and don't
* leave it to log() to subtract 1, since we gain at least 1 bit
* of accuracy in this way.
*
* When |z| is very near 1, subtracting 1 can cancel almost
* 3*MANT_DIG bits. We arrange that subtracting 1 is exact in
* doubled precision, and then do the rest of the calculation
* in sloppy doubled precision. Although large cancellations
* often lose lots of accuracy, here the final result is exact
* in doubled precision if the large calculation occurs (because
* then it is exact in tripled precision and the cancellation
* removes enough bits to fit in doubled precision). Thus the
* result is accurate in sloppy doubled precision, and the only
* significant loss of accuracy is when it is summed and passed
* to log1p().
*/
sh = ax2h;
sl = ay2h;
_2sumF(sh, sl);
if (sh < 0.5F || sh >= 3)
return (CMPLXF(logf(ay2l + ax2l + sl + sh) / 2, v));
sh -= 1;
_2sum(sh, sl);
_2sum(ax2l, ay2l);
/* Briggs-Kahan algorithm (except we discard the final low term): */
_2sum(sh, ax2l);
_2sum(sl, ay2l);
t = ax2l + sl;
_2sumF(sh, t);
return (CMPLXF(log1pf(ay2l + t + sh) / 2, v));
}

View File

@ -0,0 +1,74 @@
/*-
* Copyright (c) 2008 Stephen L. Moshier <steve@moshier.net>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* cpowf
*
* Complex power function
*
*
*
* SYNOPSIS:
*
* float complex cpowf();
* float complex a, z, w;
*
* w = cpowf (a, z);
*
*
*
* DESCRIPTION:
*
* Raises complex A to the complex Zth power.
* Definition is per AMS55 # 4.2.8,
* analytically equivalent to cpow(a,z) = cexp(z clog(a)).
*
* ACCURACY:
*
* Relative error:
* arithmetic domain # trials peak rms
* IEEE -10,+10 30000 9.4e-15 1.5e-15
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <complex.h>
#include <math.h>
#include "math_private.h"
float complex
cpowf(float complex a, float complex z)
{
float complex w;
float x, y, r, theta, absa, arga;
x = crealf(z);
y = cimagf(z);
absa = cabsf (a);
if (absa == 0.0f) {
return (CMPLXF(0.0f, 0.0f));
}
arga = cargf (a);
r = powf (absa, x);
theta = x * arga;
if (y != 0.0f) {
r = r * expf (-y * arga);
theta = theta + y * logf (absa);
}
w = CMPLXF(r * cosf (theta), r * sinf (theta));
return (w);
}

View File

@ -0,0 +1,29 @@
--- a/src/bin/CMakeLists.txt
+++ b/src/bin/CMakeLists.txt
@@ -1,7 +1,7 @@
add_executable(lfortran lfortran.cpp)
target_include_directories(lfortran PRIVATE "tpl")
target_link_libraries(lfortran lfortran_lib)
-if (UNIX AND NOT APPLE)
+if (FALSE)
# This is sometimes needed to fix link errors for CLI11
target_link_libraries(lfortran stdc++fs)
endif()
@@ -42,7 +42,7 @@
macro(LFORTRAN_COMPILE_RUNTIME name dir)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/../runtime/${name}.mod
- COMMAND ${CMAKE_CURRENT_BINARY_DIR}/lfortran
+ COMMAND lfortran
ARGS --backend=cpp -c ${CMAKE_CURRENT_SOURCE_DIR}/../runtime/${dir}/${name}.f90 -o ${name}.o
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../runtime
DEPENDS lfortran ${CMAKE_CURRENT_SOURCE_DIR}/../runtime/${dir}/${name}.f90 ${ARGN}
@@ -134,7 +134,7 @@
add_executable(cpptranslate cpptranslate.cpp)
target_link_libraries(cpptranslate lfortran_lib)
-if (UNIX AND NOT APPLE)
+if (FALSE)
# This is sometimes needed to fix link errors for CLI11
target_link_libraries(cpptranslate stdc++fs)
endif()

View File

@ -0,0 +1,70 @@
--- a/src/bin/lfortran.cpp
+++ b/src/bin/lfortran.cpp
@@ -81,10 +81,15 @@
{
char *env_p = std::getenv("LFORTRAN_KOKKOS_DIR");
if (env_p) return env_p;
+#ifdef __ANDROID__
+ static std::string lfortran_kokkos_dir_default = "@TERMUX_PREFIX@";
+ return lfortran_kokkos_dir_default;
+#else
std::cerr << "The code C++ generated by the C++ LFortran backend uses the Kokkos library" << std::endl;
std::cerr << "(https://github.com/kokkos/kokkos). Please define the LFORTRAN_KOKKOS_DIR" << std::endl;
std::cerr << "environment variable to point to the Kokkos installation." << std::endl;
throw LFortran::LFortranException("LFORTRAN_KOKKOS_DIR is not defined");
+#endif
}
#ifdef HAVE_LFORTRAN_LLVM
@@ -873,7 +878,11 @@
out << " ";
}
char *env_CC = std::getenv("LFORTRAN_CC");
+#ifdef __ANDROID__
+ std::string CC="clang";
+#else
std::string CC="gcc";
+#endif
if (env_CC) CC = env_CC;
std::string cmd = CC + " -c " + outfile_empty + " -o " + outfile;
int err = system(cmd.c_str());
@@ -905,7 +914,11 @@
out << src;
}
+#ifdef __ANDROID__
+ std::string CXX = "clang++";
+#else
std::string CXX = "g++";
+#endif
std::string options;
if (openmp) {
options += "-fopenmp ";
@@ -1037,7 +1050,11 @@
}
return 0;
} else if (backend == Backend::cpp) {
+#ifdef __ANDROID__
+ std::string CXX = "clang++";
+#else
std::string CXX = "g++";
+#endif
std::string options, post_options;
if (static_executable) {
options += " -static ";
@@ -1047,8 +1064,13 @@
}
if (kokkos) {
std::string kokkos_dir = get_kokkos_dir();
- post_options += kokkos_dir + "/lib/libkokkoscontainers.a "
- + kokkos_dir + "/lib/libkokkoscore.a -ldl";
+ if (static_executable) {
+ post_options += kokkos_dir + "/lib/libkokkoscontainers.a "
+ + kokkos_dir + "/lib/libkokkoscore.a -ldl";
+ } else {
+ post_options += kokkos_dir + "/lib/libkokkoscontainers.so "
+ + kokkos_dir + "/lib/libkokkoscore.so -ldl";
+ }
}
std::string cmd = CXX + options + " -o " + outfile + " ";
for (auto &s : infiles) {

View File

@ -0,0 +1,18 @@
--- a/src/lfortran/CMakeLists.txt
+++ b/src/lfortran/CMakeLists.txt
@@ -99,10 +99,10 @@
if (WITH_LLVM)
target_link_libraries(lfortran_lib p::llvm)
endif()
-#install(TARGETS lfortran_lib
-# RUNTIME DESTINATION bin
-# ARCHIVE DESTINATION lib
-# LIBRARY DESTINATION lib
-#)
+install(TARGETS lfortran_lib
+ RUNTIME DESTINATION bin
+ ARCHIVE DESTINATION lib
+ LIBRARY DESTINATION lib
+)
add_subdirectory(tests)

View File

@ -0,0 +1,18 @@
--- a/src/runtime/impure/lfortran_intrinsics.c
+++ b/src/runtime/impure/lfortran_intrinsics.c
@@ -5,6 +5,15 @@
#include <complex.h>
#include <string.h>
+#if defined __ANDROID__ && __ANDROID_API__ < 26
+#include "s_clog.c"
+#undef MANT_DIG
+#undef MAX_EXP
+#undef MIN_EXP
+#include "s_clogf.c"
+#include "s_cpowf.c"
+#endif
+
struct _lfortran_complex {
float re, im;
};