diff options
author | garga <garga@FreeBSD.org> | 2014-01-08 06:34:30 +0800 |
---|---|---|
committer | garga <garga@FreeBSD.org> | 2014-01-08 06:34:30 +0800 |
commit | 22b148800696b52afb99ba8a1ef5039fc74eed4c (patch) | |
tree | d650fd5471ee1179602e31706064863ff1b4987e /security/clamav-devel/files | |
parent | 37dbc8a89ca580e01c7c02d824e83f1f6d83dd96 (diff) | |
download | freebsd-ports-gnome-22b148800696b52afb99ba8a1ef5039fc74eed4c.tar.gz freebsd-ports-gnome-22b148800696b52afb99ba8a1ef5039fc74eed4c.tar.zst freebsd-ports-gnome-22b148800696b52afb99ba8a1ef5039fc74eed4c.zip |
- Update to 20140107
- Remove USE_GCC=any and fix build with clang
Diffstat (limited to 'security/clamav-devel/files')
4 files changed, 506 insertions, 0 deletions
diff --git a/security/clamav-devel/files/patch-libclamav-c++-llvm b/security/clamav-devel/files/patch-libclamav-c++-llvm new file mode 100644 index 000000000000..91c436b65f8b --- /dev/null +++ b/security/clamav-devel/files/patch-libclamav-c++-llvm @@ -0,0 +1,72 @@ +--- libclamav/c++/llvm/lib/Target/SubtargetFeature.cpp.orig ++++ libclamav/c++/llvm/lib/Target/SubtargetFeature.cpp +@@ -18,6 +18,7 @@ + #include <algorithm> + #include <cassert> + #include <cctype> ++#include <cstdlib> + using namespace llvm; + + //===----------------------------------------------------------------------===// +--- libclamav/c++/llvm/include/llvm/Support/CFG.h.orig ++++ libclamav/c++/llvm/include/llvm/Support/CFG.h +@@ -27,8 +27,9 @@ + + template <class Ptr, class USE_iterator> // Predecessor Iterator + class PredIterator : public std::iterator<std::forward_iterator_tag, +- Ptr, ptrdiff_t> { +- typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t> super; ++ Ptr, ptrdiff_t, Ptr*, Ptr*> { ++ typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*, ++ Ptr*> super; + typedef PredIterator<Ptr, USE_iterator> Self; + USE_iterator It; + +@@ -40,6 +41,7 @@ + + public: + typedef typename super::pointer pointer; ++ typedef typename super::reference reference; + + PredIterator() {} + explicit inline PredIterator(Ptr *bb) : It(bb->use_begin()) { +@@ -50,7 +52,7 @@ + inline bool operator==(const Self& x) const { return It == x.It; } + inline bool operator!=(const Self& x) const { return !operator==(x); } + +- inline pointer operator*() const { ++ inline reference operator*() const { + assert(!It.atEnd() && "pred_iterator out of range!"); + return cast<TerminatorInst>(*It)->getParent(); + } +@@ -100,10 +102,11 @@ + + template <class Term_, class BB_> // Successor Iterator + class SuccIterator : public std::iterator<std::bidirectional_iterator_tag, +- BB_, ptrdiff_t> { ++ BB_, ptrdiff_t, BB_*, BB_*> { + const Term_ Term; + unsigned idx; +- typedef std::iterator<std::bidirectional_iterator_tag, BB_, ptrdiff_t> super; ++ typedef std::iterator<std::bidirectional_iterator_tag, BB_, ptrdiff_t, BB_*, ++ BB_*> super; + typedef SuccIterator<Term_, BB_> Self; + + inline bool index_is_valid(int idx) { +@@ -112,6 +115,7 @@ + + public: + typedef typename super::pointer pointer; ++ typedef typename super::reference reference; + // TODO: This can be random access iterator, only operator[] missing. + + explicit inline SuccIterator(Term_ T) : Term(T), idx(0) {// begin iterator +@@ -142,7 +146,7 @@ + inline bool operator==(const Self& x) const { return idx == x.idx; } + inline bool operator!=(const Self& x) const { return !operator==(x); } + +- inline pointer operator*() const { return Term->getSuccessor(idx); } ++ inline reference operator*() const { return Term->getSuccessor(idx); } + inline pointer operator->() const { return operator*(); } + + inline Self& operator++() { ++idx; return *this; } // Preincrement diff --git a/security/clamav-devel/files/patch-libclamav__tomsfastmath__mont__fp_montgomery_reduce.c b/security/clamav-devel/files/patch-libclamav__tomsfastmath__mont__fp_montgomery_reduce.c new file mode 100644 index 000000000000..42770679a072 --- /dev/null +++ b/security/clamav-devel/files/patch-libclamav__tomsfastmath__mont__fp_montgomery_reduce.c @@ -0,0 +1,119 @@ +--- libclamav/tomsfastmath/mont/fp_montgomery_reduce.c.orig 2014-01-07 17:20:37.000000000 -0200 ++++ libclamav/tomsfastmath/mont/fp_montgomery_reduce.c 2014-01-07 17:21:17.000000000 -0200 +@@ -30,7 +30,7 @@ + "movl %%edx,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy), "g"(mu), "g"(*tmpm++) \ +-: "%eax", "%edx", "%cc") ++: "%eax", "%edx", "cc") + + #define PROPCARRY \ + asm( \ +@@ -39,7 +39,7 @@ + "movzbl %%al,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy) \ +-: "%eax", "%cc") ++: "%eax", "cc") + + /******************************************************************/ + #elif defined(TFM_X86_64) +@@ -62,7 +62,7 @@ + "movq %%rdx,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \ +-: "%rax", "%rdx", "%cc") ++: "%rax", "%rdx", "cc") + + #define INNERMUL8 \ + asm( \ +@@ -155,7 +155,7 @@ + \ + :"=r"(_c), "=r"(cy) \ + : "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\ +-: "%rax", "%rdx", "%r10", "%r11", "%cc") ++: "%rax", "%rdx", "%r10", "%r11", "cc") + + + #define PROPCARRY \ +@@ -165,7 +165,7 @@ + "movzbq %%al,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy) \ +-: "%rax", "%cc") ++: "%rax", "cc") + + /******************************************************************/ + #elif defined(TFM_SSE2) +@@ -280,7 +280,7 @@ + "movzbl %%al,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy) \ +-: "%eax", "%cc") ++: "%eax", "cc") + + /******************************************************************/ + #elif defined(TFM_ARM) +@@ -300,7 +300,7 @@ + " MOVCC %0,#0 \n\t" \ + " UMLAL r0,%0,%3,%4 \n\t" \ + " STR r0,%1 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","cc"); + + #define PROPCARRY \ + asm( \ +@@ -309,7 +309,7 @@ + " STR r0,%1 \n\t" \ + " MOVCS %0,#1 \n\t" \ + " MOVCC %0,#0 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r0","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r0","cc"); + + /******************************************************************/ + #elif defined(TFM_PPC32) +@@ -331,7 +331,7 @@ + " addc 16,16,18 \n\t" \ + " addze %0,17 \n\t" \ + " stw 16,%1 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","%cc"); ++tmpm; ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","cc"); ++tmpm; + + #define PROPCARRY \ + asm( \ +@@ -340,7 +340,7 @@ + " stw 16,%1 \n\t" \ + " xor %0,%0,%0 \n\t" \ + " addze %0,%0 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","cc"); + + /******************************************************************/ + #elif defined(TFM_PPC64) +@@ -362,7 +362,7 @@ + " addc r16,r16,r18 \n\t" \ + " addze %0,r17 \n\t" \ + " sdx r16,0,%1 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","%cc"); ++tmpm; ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","cc"); ++tmpm; + + #define PROPCARRY \ + asm( \ +@@ -371,7 +371,7 @@ + " sdx r16,0,%1 \n\t" \ + " xor %0,%0,%0 \n\t" \ + " addze %0,%0 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","cc"); + + /******************************************************************/ + #elif defined(TFM_AVR32) +@@ -401,7 +401,7 @@ + " st.w %1,r2 \n\t" \ + " eor %0,%0 \n\t" \ + " acr %0 \n\t" \ +-:"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","%cc"); ++:"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","cc"); + + /******************************************************************/ + #elif defined(TFM_MIPS) diff --git a/security/clamav-devel/files/patch-libclamav__tomsfastmath__mul__fp_mul_comba.c b/security/clamav-devel/files/patch-libclamav__tomsfastmath__mul__fp_mul_comba.c new file mode 100644 index 000000000000..7a6301af8222 --- /dev/null +++ b/security/clamav-devel/files/patch-libclamav__tomsfastmath__mul__fp_mul_comba.c @@ -0,0 +1,38 @@ +--- libclamav/tomsfastmath/mul/fp_mul_comba.c.orig 2014-01-07 17:20:42.000000000 -0200 ++++ libclamav/tomsfastmath/mul/fp_mul_comba.c 2014-01-07 17:21:24.000000000 -0200 +@@ -53,7 +53,7 @@ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #elif defined(TFM_X86_64) + /* x86-64 optimized */ +@@ -88,7 +88,7 @@ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #elif defined(TFM_SSE2) + /* use SSE2 optimizations */ +@@ -128,7 +128,7 @@ + "movd %%mm0,%%eax \n\t" \ + "adcl %%eax,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","cc"); + + #elif defined(TFM_ARM) + /* ARM code */ +@@ -155,7 +155,7 @@ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc"); + + #elif defined(TFM_PPC32) + /* For 32-bit PPC */ diff --git a/security/clamav-devel/files/patch-libclamav__tomsfastmath__sqr__fp_sqr_comba.c b/security/clamav-devel/files/patch-libclamav__tomsfastmath__sqr__fp_sqr_comba.c new file mode 100644 index 000000000000..0394f86b89e1 --- /dev/null +++ b/security/clamav-devel/files/patch-libclamav__tomsfastmath__sqr__fp_sqr_comba.c @@ -0,0 +1,277 @@ +--- libclamav/tomsfastmath/sqr/fp_sqr_comba.c.orig 2014-01-07 17:25:18.000000000 -0200 ++++ libclamav/tomsfastmath/sqr/fp_sqr_comba.c 2014-01-07 17:25:40.000000000 -0200 +@@ -41,7 +41,7 @@ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%edx","cc"); + + #define SQRADD2(i, j) \ + asm( \ +@@ -53,7 +53,7 @@ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #define SQRADDSC(i, j) \ + asm( \ +@@ -62,7 +62,7 @@ + "movl %%eax,%0 \n\t" \ + "movl %%edx,%1 \n\t" \ + "xorl %2,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -71,7 +71,7 @@ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","cc"); + + #define SQRADDDB \ + asm( \ +@@ -81,7 +81,7 @@ + "addl %6,%0 \n\t" \ + "adcl %7,%1 \n\t" \ + "adcl %8,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc"); + + #elif defined(TFM_X86_64) + /* x86-64 optimized */ +@@ -109,7 +109,7 @@ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i) :"%rax","%rdx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i) :"%rax","%rdx","cc"); + + #define SQRADD2(i, j) \ + asm( \ +@@ -121,7 +121,7 @@ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #define SQRADDSC(i, j) \ + asm( \ +@@ -130,7 +130,7 @@ + "movq %%rax,%0 \n\t" \ + "movq %%rdx,%1 \n\t" \ + "xorq %2,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -139,7 +139,7 @@ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #define SQRADDDB \ + asm( \ +@@ -149,7 +149,7 @@ + "addq %6,%0 \n\t" \ + "adcq %7,%1 \n\t" \ + "adcq %8,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc"); + + #elif defined(TFM_SSE2) + +@@ -181,7 +181,7 @@ + "movd %%mm0,%%eax \n\t" \ + "adcl %%eax,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","cc"); + + #define SQRADD2(i, j) \ + asm( \ +@@ -197,7 +197,7 @@ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #define SQRADDSC(i, j) \ + asm( \ +@@ -221,7 +221,7 @@ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #define SQRADDDB \ + asm( \ +@@ -231,7 +231,7 @@ + "addl %6,%0 \n\t" \ + "adcl %7,%1 \n\t" \ + "adcl %8,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc"); + + #elif defined(TFM_ARM) + +@@ -260,7 +260,7 @@ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "cc"); + + /* for squaring some of the terms are doubled... */ + #define SQRADD2(i, j) \ +@@ -272,13 +272,13 @@ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc"); + + #define SQRADDSC(i, j) \ + asm( \ + " UMULL %0,%1,%6,%7 \n\t" \ + " SUB %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -286,7 +286,7 @@ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "cc"); + + #define SQRADDDB \ + asm( \ +@@ -296,7 +296,7 @@ + " ADDS %0,%0,%3 \n\t" \ + " ADCS %1,%1,%4 \n\t" \ + " ADC %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + #elif defined(TFM_PPC32) + +@@ -326,7 +326,7 @@ + " mulhwu 16,%6,%6 \n\t" \ + " adde %1,%1,16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc"); + + /* for squaring some of the terms are doubled... */ + #define SQRADD2(i, j) \ +@@ -339,14 +339,14 @@ + " addc %0,%0,16 \n\t" \ + " adde %1,%1,17 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc"); + + #define SQRADDSC(i, j) \ + asm( \ + " mullw %0,%6,%7 \n\t" \ + " mulhwu %1,%6,%7 \n\t" \ + " xor %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -355,7 +355,7 @@ + " mulhwu 16,%6,%7 \n\t" \ + " adde %1,%1,16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc"); + + #define SQRADDDB \ + asm( \ +@@ -365,7 +365,7 @@ + " addc %0,%0,%3 \n\t" \ + " adde %1,%1,%4 \n\t" \ + " adde %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + #elif defined(TFM_PPC64) + /* PPC64 */ +@@ -394,7 +394,7 @@ + " mulhdu r16,%6,%6 \n\t" \ + " adde %1,%1,r16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","cc"); + + /* for squaring some of the terms are doubled... */ + #define SQRADD2(i, j) \ +@@ -407,14 +407,14 @@ + " addc %0,%0,r16 \n\t" \ + " adde %1,%1,r17 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","cc"); + + #define SQRADDSC(i, j) \ + asm( \ + " mulld %0,%6,%7 \n\t" \ + " mulhdu %1,%6,%7 \n\t" \ + " xor %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -423,7 +423,7 @@ + " mulhdu r16,%6,%7 \n\t" \ + " adde %1,%1,r16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "cc"); + + #define SQRADDDB \ + asm( \ +@@ -433,7 +433,7 @@ + " addc %0,%0,%3 \n\t" \ + " adde %1,%1,%4 \n\t" \ + " adde %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + + #elif defined(TFM_AVR32) +@@ -501,7 +501,7 @@ + " add %0,%0,%3 \n\t" \ + " adc %1,%1,%4 \n\t" \ + " adc %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + #elif defined(TFM_MIPS) + +@@ -571,7 +571,7 @@ + " mflo %0 \n\t" \ + " mfhi %1 \n\t" \ + " xor %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \ |