數值計算(迭代法解方程組)
阿新 • • 發佈:2019-02-10
1.主要思想:
2.雅可比迭代法及改進
# -*- coding: utf-8 -*-
import numpy as np # a package to calculate
from scipy.sparse import identity # to get a identity matrix
import time # calculate time of diffient method
np.random.seed(2) # set seed to make x0 unchanged
def Create_Tridiagonal_Matrices(a, b, c, n): # 建立一種特殊的三對角矩陣,主對角線元素b,比主對角線低一行的對角線上a,比主對角線高一行的對角線上c
A = np.zeros((n, n))
if (b <= c or b <= a or b < a + c or n < 2): # 判斷是否符合三對角矩陣的基本定義,以及n的個數,1行的三對角毫無意義
print "this is not a Create_Tridiagonal_Matrices,please check a,b,c "
A[0][0] = b;
A[0][1] = c
A[n - 1][n - 1] = b;
A[n - 1][n - 2] = a
for j in range(1, n - 1):
A[j][j - 1] = a
A[j][j] = b
A[j][j + 1] = c
return A
def Jacobi(A, b):
n = A.shape[0]#初始化DLU為0
D = np.zeros(A.shape, dtype="float")
L = np.zeros(A.shape, dtype="float" )
U = np.zeros(A.shape, dtype="float")
for i in range(n):#根據規則A=D-L-U計算
for j in range(i + 1, n):
U[i][j] = -A[i][j]
L[j][i] = -A[j][i]
for i in range(n):
D[i][i] = A[i][i]
X_k1 = np.zeros((n, 1))#初始化X_k1
#X_k2 = np.random.randn(n, 1) # 初始化X_k2,無所謂
D_inv = np.linalg.inv(D)
B = np.dot(D_inv, L + U)#計算相應的B和f
f = np.dot(D_inv, b)
#X_k2 = np.dot(B, X_k1) + f
while (np.sqrt(np.sum(np.power(np.dot(A,X_k1)-b, 2)))/np.sqrt(np.sum(np.power(b, 2))) > 1e-6): #迭代法不斷的趨近
#X_k1 = X_k2
X_k1 = np.dot(B, X_k1) + f
return X_k1
def Gauss_Seidel(A, b): #思路類似,在迭代的過程中立即用到了上一個解
# D=np.diag(np.diag(A),0)
n = A.shape[0]
D = np.zeros(A.shape, dtype="float")
L = np.zeros(A.shape, dtype="float")
U = np.zeros(A.shape, dtype="float")
for i in range(n):#根據規則A=D-L-U計算
for j in range(i + 1, n):
U[i][j] = -A[i][j]
L[j][i] = -A[j][i]
for i in range(n):
D[i][i] = A[i][i]
# 初始化X_k1,k2
X_k1 = np.zeros((n, 1))
#X_k2 = np.random.randn(n, 1)
D_Minus_L_inv = np.linalg.inv(D - L)
B_G = np.dot(D_Minus_L_inv, U) #算出相應B
f_G = np.dot(D_Minus_L_inv, b)#算出相應f
#X_k2 = np.dot(B_G, X_k1) + f_G
while (np.sqrt(np.sum(np.power(np.dot(A, X_k1) - b, 2))) / np.sqrt(np.sum(np.power(b, 2))) > 1e-6):
#X_k1 = X_k2
X_k1 = np.dot(B_G, X_k1) + f_G
return X_k1
def S_O_R(A, b, w): #思路類似,增加了w來加速迭代過程
# D=np.diag(np.diag(A),0)
n = A.shape[0]
D = np.zeros(A.shape, dtype="float")
L = np.zeros(A.shape, dtype="float")
U = np.zeros(A.shape, dtype="float")
for i in range(n):
for j in range(i + 1, n):
U[i][j] = -A[i][j]
L[j][i] = -A[j][i]
for i in range(n):
D[i][i] = A[i][i]
X_k1 = np.zeros((n, 1))
D_Minus_wL_inv = np.linalg.inv(D - w * L)
B_w = np.dot(D_Minus_wL_inv, (1 - w) * D + w * U)#算出相應B
f_w = w * np.dot(D_Minus_wL_inv, b)#算出相應f
while (np.sqrt(np.sum(np.power(np.dot(A, X_k1) - b, 2))) / np.sqrt(np.sum(np.power(b, 2))) > 1e-6):
X_k1 = np.dot(B_w, X_k1) + f_w
return X_k1
def timecmp(A,b,fun1,fun2,fun3):#通過執行100次的平均時間較各個函式的效率
time1=np.zeros((10,1))
time2 = np.zeros((10, 1))
time3 = np.zeros((10, 1))
for i in range(10):
t1=time.clock()
Jacobi(A, b)
time1[i][0]=time.clock()-t1
t2 = time.clock()
Gauss_Seidel(A, b)
time2[i][0] = time.clock() - t2
t3 = time.clock()
S_O_R(A, b, 1.5)
time3[i][0] = time.clock() - t3
return np.mean(time1),np.mean(time2),np.mean(time3)
n=100
A = Create_Tridiagonal_Matrices(-1, 2, -1, n)
print "\n 生成的係數行列式是", A
b = np.ones((n, 1))
print "\n 生成的b是", b
print "\n 呼叫函式算出來解為", np.linalg.solve(A, b)
t1 = time.clock()
X = Jacobi(A, b)
print "計算花費時間",time.clock()-t1
print 'Jacobi計算出來的解:',X
t1 = time.clock()
X = Gauss_Seidel(A, b)
print "\n計算花費時間",time.clock()-t1
print 'Gauss_Seidel計算出來的解:',X
print X
t1 = time.clock()
X = S_O_R(A, b, 0.5)
print "\n計算花費時間",time.clock()-t1
print 'S_O_R計算出來的解:',X
print "over"
print "Jacobi,Gauss_Seidel,S_O_R 在10次執行的平均時間",timecmp(A,b,Jacobi,Gauss_Seidel,S_O_R)