diff --git a/thirdparty/assimp/code/res/resource.h b/thirdparty/assimp/code/res/resource.h deleted file mode 100644 index 37d39284fe7..00000000000 --- a/thirdparty/assimp/code/res/resource.h +++ /dev/null @@ -1,14 +0,0 @@ -//{{NO_DEPENDENCIES}} -// Microsoft Visual C++ generated include file. -// Used by assimp.rc - -// Nächste Standardwerte für neue Objekte -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NEXT_RESOURCE_VALUE 101 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1001 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif diff --git a/thirdparty/misc/clipper.cpp b/thirdparty/misc/clipper.cpp index d3143fe5ab6..8c3a59c4cad 100644 --- a/thirdparty/misc/clipper.cpp +++ b/thirdparty/misc/clipper.cpp @@ -4329,10 +4329,10 @@ double DistanceFromLineSqrd( const IntPoint& pt, const IntPoint& ln1, const IntPoint& ln2) { //The equation of a line in general form (Ax + By + C = 0) - //given 2 points (x¹,y¹) & (x²,y²) is ... - //(y¹ - y²)x + (x² - x¹)y + (y² - y¹)x¹ - (x² - x¹)y¹ = 0 - //A = (y¹ - y²); B = (x² - x¹); C = (y² - y¹)x¹ - (x² - x¹)y¹ - //perpendicular distance of point (x³,y³) = (Ax³ + By³ + C)/Sqrt(A² + B²) + //given 2 points (x¹,y¹) & (x²,y²) is ... + //(y¹ - y²)x + (x² - x¹)y + (y² - y¹)x¹ - (x² - x¹)y¹ = 0 + //A = (y¹ - y²); B = (x² - x¹); C = (y² - y¹)x¹ - (x² - x¹)y¹ + //perpendicular distance of point (x³,y³) = (Ax³ + By³ + C)/Sqrt(A² + B²) //see http://en.wikipedia.org/wiki/Perpendicular_distance double A = double(ln1.Y - ln2.Y); double B = double(ln2.X - ln1.X); diff --git a/thirdparty/xatlas/xatlas.cpp b/thirdparty/xatlas/xatlas.cpp index eb0824a5176..2cc2905eee7 100644 --- a/thirdparty/xatlas/xatlas.cpp +++ b/thirdparty/xatlas/xatlas.cpp @@ -4388,7 +4388,7 @@ private: class Solver { public: - // Solve the symmetric system: At·A·x = At·b + // Solve the symmetric system: At·A·x = At·b static bool LeastSquaresSolver(const sparse::Matrix &A, const FullVector &b, FullVector &x, float epsilon = 1e-5f) { xaDebugAssert(A.width() == x.dimension()); @@ -4477,22 +4477,22 @@ private: * Gradient method. * * Solving sparse linear systems: - * (1) A·x = b + * (1) A·x = b * * The conjugate gradient algorithm solves (1) only in the case that A is * symmetric and positive definite. It is based on the idea of minimizing the * function * - * (2) f(x) = 1/2·x·A·x - b·x + * (2) f(x) = 1/2·x·A·x - b·x * * This function is minimized when its gradient * - * (3) df = A·x - b + * (3) df = A·x - b * * is zero, which is equivalent to (1). The minimization is carried out by * generating a succession of search directions p.k and improved minimizers x.k. - * At each stage a quantity alfa.k is found that minimizes f(x.k + alfa.k·p.k), - * and x.k+1 is set equal to the new point x.k + alfa.k·p.k. The p.k and x.k are + * At each stage a quantity alfa.k is found that minimizes f(x.k + alfa.k·p.k), + * and x.k+1 is set equal to the new point x.k + alfa.k·p.k. The p.k and x.k are * built up in such a way that x.k+1 is also the minimizer of f over the whole * vector space of directions already taken, {p.1, p.2, . . . , p.k}. After N * iterations you arrive at the minimizer over the entire vector space, i.e., the @@ -4520,7 +4520,7 @@ private: float delta_new; float alpha; float beta; - // r = b - A·x; + // r = b - A·x; sparse::copy(b, r); sparse::sgemv(-1, A, x, 1, r); // p = r; @@ -4529,24 +4529,24 @@ private: delta_0 = delta_new; while (i < i_max && delta_new > epsilon * epsilon * delta_0) { i++; - // q = A·p + // q = A·p mult(A, p, q); - // alpha = delta_new / p·q + // alpha = delta_new / p·q alpha = delta_new / sparse::dot( p, q ); - // x = alfa·p + x + // x = alfa·p + x sparse::saxpy(alpha, p, x); if ((i & 31) == 0) { // recompute r after 32 steps - // r = b - A·x + // r = b - A·x sparse::copy(b, r); sparse::sgemv(-1, A, x, 1, r); } else { - // r = r - alpha·q + // r = r - alpha·q sparse::saxpy(-alpha, q, r); } delta_old = delta_new; delta_new = sparse::dot( r, r ); beta = delta_new / delta_old; - // p = beta·p + r + // p = beta·p + r sparse::scal(beta, p); sparse::saxpy(1, r, p); } @@ -4572,35 +4572,35 @@ private: float delta_new; float alpha; float beta; - // r = b - A·x + // r = b - A·x sparse::copy(b, r); sparse::sgemv(-1, A, x, 1, r); - // p = M^-1 · r + // p = M^-1 · r preconditioner.apply(r, p); delta_new = sparse::dot(r, p); delta_0 = delta_new; while (i < i_max && delta_new > epsilon * epsilon * delta_0) { i++; - // q = A·p + // q = A·p mult(A, p, q); - // alpha = delta_new / p·q + // alpha = delta_new / p·q alpha = delta_new / sparse::dot(p, q); - // x = alfa·p + x + // x = alfa·p + x sparse::saxpy(alpha, p, x); if ((i & 31) == 0) { // recompute r after 32 steps - // r = b - A·x + // r = b - A·x sparse::copy(b, r); sparse::sgemv(-1, A, x, 1, r); } else { - // r = r - alfa·q + // r = r - alfa·q sparse::saxpy(-alpha, q, r); } - // s = M^-1 · r + // s = M^-1 · r preconditioner.apply(r, s); delta_old = delta_new; delta_new = sparse::dot( r, s ); beta = delta_new / delta_old; - // p = s + beta·p + // p = s + beta·p sparse::scal(beta, p); sparse::saxpy(1, s, p); }