Skip to content

01

Optimization

from scipy.optimize import minimize

def func(x, m, c):
return m*x + c

def jacobian(x, m, c):
return ## first derivative

def hessian(x, m, c):
return ## second derivative

constraints = (
## equality constraint
## fun = 0
dict(
  type = 'eq', ## = 0
fun = lambda x: x.sum() - 1.0,
jac = lambda x: np.ones_like(x) ## optional, but recommended
),
## inequality constraint
## fun >= 0
dict(
  type = 'ineq', ## = 0
fun = lambda x: x.sum() + 1.0,
jac = lambda x: np.ones_like(x) ## optional, but recommended
),
)

bounds = (
(0, None),
(0, None)
)

res = minimize(
func=func,
x0 = [0, 0], ## initial_guess
method = 'SLSQP',
jac = jacobian,
## hes = hessian,
bounds = bounds,
constraints = constraints
)

Interpolation

from scipy.interpolate import interp1d

f = interp1d(
x,
y,
kind="linear" ## "cubic"
)
interpolated_values = f(x_dash)

Curve Fitting

from scipy.optimize import curve_fit

def func(x, a, b):
return (a * x**2) + b

popt, pcov = curve_fit(
func,
x_data,
y_data,
p0 = (1, 1)
) 

PCov

pcov = (
  np.sqrt(self.optimization.fun) * np.sqrt(np.diag(
    self
    .optimization
    .hess_inv
    .todense()
  ))
)

Custom Curve Fit with Regularization

Custom Solvers

Solver

  import numpy as np
  from scipy.optimize import OptimizeResult

The below implementations always return success=True

Shouldn't we check it is actually successful?

BGD

    def bgd(
        fun,
        x0,
        jac,
        args=(),
        learning_rate=0.001,
        mass=0.9,
        startiter=0,
        maxiter=1000,
        callback=None,
        **kwargs
    ):
        """``scipy.optimize.minimize`` compatible implementation of batch
        gradient descent with momentum.

        Adapted from ``autograd/misc/optimizers.py``.
        """
        x = x0
        velocity = np.zeros_like(x)

        for i in range(startiter, startiter + maxiter):
            g = jac(x)

            if callback and callback(x):
                break

            velocity = mass * velocity - (1.0 - mass) * g
            x = x + learning_rate * velocity

        i += 1
        return OptimizeResult(x=x, fun=fun(x), jac=g, nit=i, nfev=i, success=True)

RMSProp

    def rmsprop(
        fun,
        x0,
        jac,
        args=(),
        learning_rate=0.1,
        gamma=0.9,
        eps=1e-8,
        startiter=0,
        maxiter=1000,
        callback=None,
        **kwargs
    ):
        """``scipy.optimize.minimize`` compatible implementation of root mean
        squared prop: See Adagrad paper for details.

        Adapted from ``autograd/misc/optimizers.py``.
        """
        x = x0
        avg_sq_grad = np.ones_like(x)

        for i in range(startiter, startiter + maxiter):
            g = jac(x)

            if callback and callback(x):
                break

            avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma)
            x = x - learning_rate * g / (np.sqrt(avg_sq_grad) + eps)

        i += 1
        return OptimizeResult(x=x, fun=fun(x), jac=g, nit=i, nfev=i, success=True)

Adam

    def adam(
        fun,
        x0,
        jac,
        args=(),
        learning_rate=0.001,
        beta1=0.9,
        beta2=0.999,
        eps=1e-8,
        startiter=0,
        maxiter=1000,
        callback=None,
        **kwargs
    ):
        """``scipy.optimize.minimize`` compatible implementation of ADAM -
        [http://arxiv.org/pdf/1412.6980.pdf].

        Adapted from ``autograd/misc/optimizers.py``.
        """
        x = x0
        m = np.zeros_like(x)
        v = np.zeros_like(x)

        for i in range(startiter, startiter + maxiter):
            g = jac(x)

            if callback and callback(x):
                break

            m = (1 - beta1) * g + beta1 * m  ## first  moment estimate.
            v = (1 - beta2) * (g**2) + beta2 * v  ## second moment estimate.
            mhat = m / (1 - beta1**(i + 1))  ## bias correction.
            vhat = v / (1 - beta2**(i + 1))
            x = x - learning_rate * mhat / (np.sqrt(vhat) + eps)

        i += 1
        return OptimizeResult(x=x, fun=fun(x), jac=g, nit=i, nfev=i, success=True)

Usage

  from scipy.optimize import minimize

  res = minimize(..., method = func) ## func = sgd, rmsprop, or adam
  print(res.x)

Calculus

Differentiation

Analytical

  from scipy.misc import derivative as d

  def f(x):
    return x**2

  d(f, x, dx=1e-6)
  def pd(func, var=0, point=[]):
      ## partial derivative
      args = point[:]
      def wraps(x):
          args[var] = x
          return func(*args)
      return derivative(wraps, point[var], dx = 1e-6)

  pd(foo, 0, [3,1]) ## 6.0000000008386678
  pd(foo, 1, [3,1]) ## 2.9999999995311555

Integration

from scipy.integrate import quad

def func(x):
  return x**2

integral, integral_error = quad(func, 0, 1)
from scipy.integrate import dbquad

def func(x):
  return x**2 + y**2

integral, integral_error = dblquad(func, 0, 1, 0, 1)
from scipy.integrate import nquad

Differential Equations

from scipy.integrate import odeint

def dvdt(v, t):
  return 3 * v**2 - 5

v0 = 0

t = np.linspace(0, 1, 100)
sol = odeint(dvdt, v0, t)
## coupled

def dSdx(S, x):
  y1, y2 = S
  return [
    y1 + y2**2 + 3*x,
    3*y1 + y2**3 - np.cos(x)
  ]

y1_0 = 0
y2_0 = 0
S_0 = (y1_0, y2_0)

x = np.linspace(0, 1, 100)
sol = odeint(dSdx, S_0, x)

y1 = sol.T[0]
y2 = sol.T[1]
## second-order DE must be transformed into 2 coupled first order DE

Fourier Transforms

Numeric

Continuous Time & Frequency

from scipy.integrate import quad

def x(t, k):
  return np.exp(-k * t**2) * np.sin(k*t) * t**4

def get_x_ft(x, f, k):
  x_FT_integrand_real = lambda t: np.real(x(t, k) * np.exp( -2*np.pi*1j*f*t) )
  x_FT_integrand_complex = lambda t: np.imag(x(t, k) * np.exp( -2*np.pi*1j*f*t) )

  x_FT_real = quad(x_FT_integrand_real, -np.inf, np.inf)[0]
  x_FT_comp = quad(x_FT_integrand_comp, -np.inf, np.inf)[0]

  return x_FT_real + 1j*x_FT_comp
f = np.linspace(-4, 4, 100)
x_FT = np.vectorize(get_x_FT)(x, f, k=2)

Continuous Time & Discrete Frequency

from scipy.integrate import quad

def x(t, k):
  return np.exp(-k * t**2) * np.sin(k*t) /t

def get_x_ft(x, f, k, T):
  x_FT_integrand_real = lambda t: np.real(x(t, k) * np.exp( -2*np.pi*1j*(n/T)*t) )
  x_FT_integrand_complex = lambda t: np.imag(x(t, k) * np.exp( -2*np.pi*1j*(n/T)*t) )

  x_FT_real = quad(x_FT_integrand_real, 0, T)[0]
  x_FT_comp = quad(x_FT_integrand_comp, 0, T)[0]

  return x_FT_real + 1j*x_FT_comp
ns = np.arange(0, 20, 1)
x_FT = np.vectorize(get_x_FT)(x, ns, k=2, T=4)

FFT

Discrete Time & Frequency

# 1D
from scipy.fft import fft, fftfreq

y = fft(x)
f = fftfreq(len(x), np.diff(t)[0])
# 2D
from scipy.fft import fft2, fftfreq2

img_FT =_fft2(img)
fy = fftfreq(img.shape[0], d=10) # suppose the spacing between pixels is 10mm
fx = fftfreq(img.shape[1], d=10)

Linear Algebra

Not very applicable to me

Stats

dist.pmf() ## probability mass function
dist.cdf() ## cumulative dist function
dist.ppf() ## inverse of cdf
dist.rvs() ## generate random variable sample

beta

from scipy.stats import beta
a, b = 2.5, 3.1

mean, var, skew, kurt = beta.stats(a, b, moments="mvsk")

normal

norm.ppf(0.025, mu, sigma)
norm.ppf(0.975, mu, sigma)

skewnorm

from scipy.stats import skewnorm

## skewness_factor
a = 5 ## +ve skew
a = -5 ## -ve skew

mean, var, skew, kurt = skewnorm.stats(a, moments='mvsk')
generated_values = skewnorm.rvs(a, size=1000)

custom

import scipy.stats as st

class my_dist(st.rv_continuous):
  def _pdf(self, x, a1, a2, b1, b2):
    return np.sin(x)

  ## def _cdf(self, )

  ## def _rvs(self, )

my_rv = my_dist(a = 0, b=np.inf)

Jac

Need not be the final

Last Updated: 2024-05-12 ; Contributors: AhmedThahir

Comments