This commit is contained in:
Laureηt 2021-12-04 18:43:26 +01:00
parent ddf71b4a3f
commit a9fa860b74
No known key found for this signature in database
GPG key ID: D88C6B294FD40994
6 changed files with 888 additions and 118 deletions

View file

@ -19,7 +19,7 @@ xk,f_min,flag,nb_iters = Algorithme_de_Newton(f,gradf,hessf,x0,option)
# Sorties:
* **xmin** : (Array{Float,1}) une approximation de la solution du problème : ``\min_{x \in \mathbb{R}^{n}} f(x)``
* **f_min** : (Float) ``f(x_{min})``
* **flag** : (Integer) indique le critère sur lequel le programme à arrêter
* **flag** : (Integer) indique le critère sur lequel le programme à arrêter
* **0** : Convergence
* **1** : stagnation du xk
* **2** : stagnation du f
@ -37,7 +37,7 @@ options = []
xmin,f_min,flag,nb_iters = Algorithme_De_Newton(f,gradf,hessf,x0,options)
```
"""
function Algorithme_De_Newton(f::Function,gradf::Function,hessf::Function,x0,options)
function Algorithme_De_Newton(f::Function, gradf::Function, hessf::Function, x0, options)
"# Si options == [] on prends les paramètres par défaut"
if options == []
@ -49,9 +49,42 @@ function Algorithme_De_Newton(f::Function,gradf::Function,hessf::Function,x0,opt
Tol_abs = options[2]
Tol_rel = options[3]
end
xmin = x0
f_min = f(x0)
k = 0
flag = -1
xkp1 = x0
xk = x0
if norm(gradf(xkp1)) <= max(Tol_rel * norm(gradf(x0)), Tol_abs)
flag = 0
nb_iters = 0
return xmin,f_min,flag,nb_iters
else
while true
k += 1
dk = -hessf(xk) \ gradf(xk)
xkp1 = xk + dk
if norm(gradf(xkp1)) <= max(Tol_rel * norm(gradf(x0)), Tol_abs)
flag = 0
break
elseif norm(xkp1 - xk) <= max(Tol_rel * norm(xk), Tol_abs)
flag = 1
break
elseif abs(f(xkp1) - f(xk)) <= max(Tol_rel * abs(f(xk)), Tol_abs)
flag = 2
break
elseif k >= max_iter
flag = 3
break
end
xk = xkp1
end
end
xmin = xkp1
f_min = f(xmin)
return xmin, f_min, flag, k
end

View file

@ -28,20 +28,116 @@ options = []
s = Gradient_Conjugue_Tronque(gradf(xk),hessf(xk),options)
```
"""
function Gradient_Conjugue_Tronque(gradfk,hessfk,options)
function Gradient_Conjugue_Tronque(gradfk, hessfk, options)
"# Si option est vide on initialise les 3 paramètres par défaut"
if options == []
deltak = 2
delta_k = 2
max_iter = 100
tol = 1e-6
else
deltak = options[1]
delta_k = options[1]
max_iter = options[2]
tol = options[3]
end
n = length(gradfk)
s = zeros(n)
return s
n = length(gradfk)
g_0 = gradfk
g_j = g_0
g_j1 = -1
p_j = -g_0
p_j1 = -1
s_j = zeros(n)
s_j1 = -1
kappa_j = -1
sigma_j = -1
alpha_j = -1
beta_j = -1
s = -1
for j = 0:max_iter
# a
kappa_j = p_j' * hessfk * p_j
# b
if kappa_j <= 0
a = sum(p_j .^ 2)
b = 2 * sum(p_j .* s_j)
c = sum(s_j .^ 2)
discriminant = b^2 - 4 * a * c
x1 = (-b + sqrt(discriminant)) / (2 * a)
x2 = (-b - sqrt(discriminant)) / (2 * a)
q(s) = g_j' * s_j + 0.5 * s_j' * hessfk * s_j
v1 = q(s_j + x1 * p_j)
v2 = q(s_j + x2 * p_j)
if v1 < v2
sigma_j = x1
else
sigma_j = x2
end
s = s_j + sigma_j * p_j
break
end
# c
alpha_j = g_j' * g_j / kappa_j
# d
if norm(s_j + alpha_j * p_j) >= delta_k
a = sum(p_j .^ 2)
b = 2 * sum(p_j .* s_j)
c = sum(s_j .^ 2)
discriminant = b^2 - 4 * a * c
x1 = (-b + sqrt(discriminant)) / (2 * a)
x2 = (-b - sqrt(discriminant)) / (2 * a)
if x1 > 0
sigma_j = x1
else
sigma_j = x2
end
s = s_j + sigma_j * p_j
break
end
# e
s_j1 = s_j + alpha_j * p_j
# f
g_j1 = g_j + alpha_j * hessfk * p_j
# g
beta_j = (g_j1' * g_j1) / (g_j' * g_j)
# h
p_j1 = -g_j1 + beta_j * p_j
# i
if norm(g_j1) <= tol * norm(g_0)
s = s_j1
break
end
s_j = s_j1
g_j = g_j1
p_j = p_j1
end
return s
end

View file

@ -32,11 +32,32 @@ delta1 = 1
s1, e1 = Pas_De_Cauchy(g1,H1,delta1)
```
"""
function Pas_De_Cauchy(g,H,delta)
function Pas_De_Cauchy(g, H, delta)
flag = undef
sol = undef
coeff = g' * H * g
if norm(g) == 0
sol = g
flag = 0
else
if coeff > 0
t = norm(g)^2 / coeff
if t * norm(g) < delta
sol = -t * g
flag = 1
else
sol = -delta / norm(g) * g
flag = -1
end
else
sol = -delta / norm(g) * g
flag = -1
end
end
return sol, flag
e = 0
n = length(g)
s = zeros(n)
return s, e
end

View file

@ -50,7 +50,7 @@ options = []
xmin, fxmin, flag,nb_iters = Regions_De_Confiance(algo,f,gradf,hessf,x0,options)
```
"""
function Regions_De_Confiance(algo,f::Function,gradf::Function,hessf::Function,x0,options)
function Regions_De_Confiance(algo, f::Function, gradf::Function, hessf::Function, x0, options)
if options == []
deltaMax = 10
@ -77,8 +77,73 @@ function Regions_De_Confiance(algo,f::Function,gradf::Function,hessf::Function,x
n = length(x0)
xmin = zeros(n)
fxmin = f(xmin)
flag = 0
nb_iters = 0
return xmin, fxmin, flag, nb_iters
flag = -1
k = 0
x_k = x0
x_k1 = x0
delta_k = delta0
delta_k1 = delta0
m_k(x, s) = f(x) + gradf(x)' * s + 0.5 * s' * hessf(x) * s
if norm(gradf(x_k1)) <= max(Tol_rel * norm(gradf(x0)), Tol_abs)
flag = 0
else
while true
k += 1
# a. Calculer approximativement s_k, solution du sous-problème
if algo == "gct"
s_k = Gradient_Conjugue_Tronque(gradf(x_k), hessf(x_k), [delta_k max_iter Tol_rel])
elseif algo == "cauchy"
s_k, e_k = Pas_De_Cauchy(gradf(x_k), hessf(x_k), delta_k)
end
# b. Evaluer f(x_k + s_k)
rho_k = (f(x_k) - f(x_k + s_k)) / (m_k(x_k, zeros(n)) - (m_k(x_k, s_k)))
# c. Mettre à jour litéré courant :
if rho_k >= eta1
x_k1 = x_k + s_k
else
x_k1 = x_k
end
# d. Mettre à jour la région de confiance
if rho_k >= eta2
delta_k1 = min(gamma2 * delta_k, deltaMax)
elseif rho_k >= eta1
delta_k1 = delta_k
else
delta_k1 = gamma1 * delta_k
end
# Tests de convergeance
if norm(gradf(x_k1)) <= max(Tol_rel * norm(gradf(x0)), Tol_abs)
flag = 0
break
elseif norm(x_k1 - x_k) <= max(Tol_rel * norm(x_k), Tol_abs)
flag = 1
break
elseif abs(f(x_k1) - f(x_k)) <= max(Tol_rel * abs(f(x_k)), Tol_abs)
flag = 2
break
elseif k >= max_iter
flag = 3
break
end
x_k = x_k1
delta_k = delta_k1
end
end
xmin = x_k1
fxmin = f(xmin)
return xmin, fxmin, flag, k
end

View file

@ -7,8 +7,8 @@
"<center>\n",
"<h1> TP-Projet d'optimisation numérique </h1>\n",
"<h1> Année 2020-2021 - 2e année département Sciences du Numérique </h1>\n",
"<h1> Noms: </h1>\n",
"<h1> Prénoms: </h1> \n",
"<h1> Nom: FAINSIN </h1>\n",
"<h1> Prénom: Laurent </h1> \n",
"</center>"
]
},
@ -26,7 +26,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 78,
"metadata": {},
"outputs": [
{
@ -34,22 +34,135 @@
"output_type": "stream",
"text": [
"\u001b[32m\u001b[1m Status\u001b[22m\u001b[39m `~/.julia/environments/v1.6/Project.toml`\n",
" \u001b[90m [336ed68f] \u001b[39mCSV v0.9.10\n",
" \u001b[90m [a93c6f00] \u001b[39mDataFrames v1.2.2\n",
" \u001b[90m [0c46a032] \u001b[39mDifferentialEquations v6.20.0\n",
" \u001b[90m [7073ff75] \u001b[39mIJulia v1.23.2\n",
" \u001b[90m [91a5bcdd] \u001b[39mPlots v1.23.6\n"
" \u001b[90m [a6016688] \u001b[39mTestOptinum v0.1.0 `https://github.com/mathn7/TestOptinum.git#master`\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m\u001b[1m Resolving\u001b[22m\u001b[39m package versions...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m\u001b[1m No Changes\u001b[22m\u001b[39m to `~/.julia/environments/v1.6/Project.toml`\n",
"\u001b[32m\u001b[1m No Changes\u001b[22m\u001b[39m to `~/.julia/environments/v1.6/Manifest.toml`\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m\u001b[1mPrecompiling\u001b[22m\u001b[39m "
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"project...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[33m ✓ \u001b[39mTestOptinum\n",
" 1 dependency successfully precompiled in 4 seconds (170 already precompiled)\n",
" \u001b[33m1\u001b[39m dependency precompiled but a different version is currently loaded. Restart julia to access the new version\n"
]
},
{
"data": {
"text/plain": [
"Gradient_Conjugue_Tronque"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"using Pkg\n",
"Pkg.status()"
"Pkg.status()\n",
"Pkg.add(\"DifferentialEquations\")\n",
"# Pkg.add(\"LinearAlgebra\");\n",
"# Pkg.add(\"Markdown\")\n",
"# using Documenter\n",
"using LinearAlgebra\n",
"using Markdown\n",
"using TestOptinum\n",
"using Test\n",
"\n",
"include(\"Algorithme_De_Newton.jl\")\n",
"include(\"Pas_De_Cauchy.jl\")\n",
"include(\"Regions_De_Confiance.jl\")\n",
"include(\"Gradient_Conjugue_Tronque.jl\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 101,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"2-element Vector{Float64}:\n",
" 1.0\n",
" 0.01"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Fonction f0\n",
"# -----------\n",
"f0(x) = sin.(x)\n",
"# la gradient de la fonction f0\n",
"grad_f0(x) = cos.(x)\n",
"# la hessienne de la fonction f0\n",
"hess_f0(x) = -sin.(x)\n",
"sol_exacte_f0 = -pi / 2\n",
"\n",
"# Fonction f1\n",
"# -----------\n",
"f1(x) = 2 * (x[1] + x[2] + x[3] - 3)^2 + (x[1] - x[2])^2 + (x[2] - x[3])^2\n",
"# la gradient de la fonction f1\n",
"grad_f1(x) = [\n",
" 4 * (x[1] + x[2] + x[3] - 3) + 2 * (x[1] - x[2])\n",
" 4 * (x[1] + x[2] + x[3] - 3) - 2 * (x[1] - x[2]) + 2 * (x[2] - x[3])\n",
" 4 * (x[1] + x[2] + x[3] - 3) - 2 * (x[2] - x[3])\n",
"]\n",
"# la hessienne de la fonction f1\n",
"hess_f1(x) = [6 2 4; 2 8 2; 4 2 6]\n",
"sol_exacte_f1 = [1, 1, 1]\n",
"\n",
"# Fonction f2\n",
"# -----------\n",
"f2(x) = (100 * x[2] - x[1]^2)^2 + (1 - x[1])^2\n",
"# la gradient de la fonction f2\n",
"grad_f2(x) = [\n",
" (2 * (200 * x[1]^3 - 200 * x[1] * x[2] + x[1] - 1))\n",
" (200 * (x[2] - x[1]^2))\n",
"]\n",
"# la hessienne de la fonction f2\n",
"hess_f2(x) = [\n",
" (-400*(x[2]-x[1]^2)+800*x[1]^2+2) (-400*x[1])\n",
" (-400*x[1]) (200)\n",
"]\n",
"sol_exacte_f2 = [1, 1 / 100]"
]
},
{
"cell_type": "code",
"execution_count": 102,
"metadata": {},
"outputs": [
{
@ -58,67 +171,168 @@
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f0 au point initial -1.5707963267948966:\u001b[22m\u001b[39m\n",
" * xsol = [0.0]\n",
" * f(xsol) = 0\n",
" * xsol = -1.5707963267948966\n",
" * f(xsol) = -1.0\n",
" * nb_iters = 0\n",
" * flag = 0\n",
" * sol_exacte : -1.5707963267948966\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f0 au point initial -1.0707963267948966:\u001b[22m\u001b[39m\n",
" * xsol = [0.0]\n",
" * f(xsol) = 0\n",
" * nb_iters = 0\n",
" * xsol = -1.5707963267949088\n",
" * f(xsol) = -1.0\n",
" * nb_iters = 3\n",
" * flag = 0\n",
" * sol_exacte : -1.5707963267948966\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f0 au point initial 1.5707963267948966:\u001b[22m\u001b[39m\n",
" * xsol = [0.0]\n",
" * f(xsol) = 0\n",
" * xsol = 1.5707963267948966\n",
" * f(xsol) = 1.0\n",
" * nb_iters = 0\n",
" * flag = 0\n",
" * sol_exacte : -1.5707963267948966\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f1 au point initial [1, 1, 1]:\u001b[22m\u001b[39m\n",
" * xsol = [1, 1, 1]\n",
" * f(xsol) = 0\n",
" * nb_iters = 0\n",
" * flag = 0\n",
" * sol_exacte : [1, 1, 1]\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f1 au point initial [1, 0, 0]:\u001b[22m\u001b[39m\n",
" * xsol = [1.0000000000000002, 1.0, 0.9999999999999998]\n",
" * f(xsol) = 9.860761315262648e-32\n",
" * nb_iters = 1\n",
" * flag = 0\n",
" * sol_exacte : [1, 1, 1]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f1 au point initial [10.0, 3.0, -2.2]:\u001b[22m\u001b[39m\n",
" * xsol = [1.0, 0.9999999999999996, 0.9999999999999982]\n",
" * f(xsol) = 1.1832913578315177e-29\n",
" * nb_iters = 1\n",
" * flag = 0\n",
" * sol_exacte : [1, 1, 1]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f2 au point initial [1.0, 0.01]:\u001b[22m\u001b[39m\n",
" * xsol = [1.0, 1.0]\n",
" * f(xsol) = 9801.0\n",
" * nb_iters = 1\n",
" * flag = 0\n",
" * sol_exacte : [1.0, 0.01]\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f2 au point initial [-1.2, 1.0]:\u001b[22m\u001b[39m\n",
" * xsol = [1.0000000000000238, 0.9999999999815201]\n",
" * f(xsol) = 9800.999999634088\n",
" * nb_iters = 6\n",
" * flag = 0\n",
" * sol_exacte : [1.0, 0.01]\n",
"-------------------------------------------------------------------------"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\u001b[34m\u001b[1mRésultats de : Newton appliqué à f2 au point initial [10, 0]:\u001b[22m\u001b[39m\n",
" * xsol = [1.000000000000007, 1.0000000000000142]\n",
" * f(xsol) = 9801.000000000278\n",
" * nb_iters = 5\n",
" * flag = 0\n",
" * sol_exacte : [1.0, 0.01]\n"
]
}
],
"source": [
"#using Pkg; Pkg.add(\"LinearAlgebra\"); Pkg.add(\"Markdown\")\n",
"# using Documenter\n",
"using LinearAlgebra\n",
"using Markdown # Pour que les docstrings en début des fonctions ne posent\n",
" # pas de soucis. Ces docstrings sont utiles pour générer \n",
" # la documentation sous GitHub\n",
"include(\"Algorithme_De_Newton.jl\")\n",
"\n",
"# Affichage les sorties de l'algorithme des Régions de confiance\n",
"function my_afficher_resultats(algo,nom_fct,point_init,xmin,fxmin,flag,sol_exacte,nbiters)\n",
"\tprintln(\"-------------------------------------------------------------------------\")\n",
"\tprintstyled(\"Résultats de : \",algo, \" appliqué à \",nom_fct, \" au point initial \", point_init, \":\\n\",bold=true,color=:blue)\n",
"\tprintln(\" * xsol = \",xmin)\n",
"\tprintln(\" * f(xsol) = \",fxmin)\n",
"\tprintln(\" * nb_iters = \",nbiters)\n",
"\tprintln(\" * flag = \",flag)\n",
"\tprintln(\" * sol_exacte : \", sol_exacte)\n",
"# Affichage les sorties de l'algorithme de Newton\n",
"function my_afficher_resultats(algo, nom_fct, point_init, xmin, fxmin, flag, sol_exacte, nbiters)\n",
" println(\"-------------------------------------------------------------------------\")\n",
" printstyled(\"Résultats de : \", algo, \" appliqué à \", nom_fct, \" au point initial \", point_init, \":\\n\", bold = true, color = :blue)\n",
" println(\" * xsol = \", xmin)\n",
" println(\" * f(xsol) = \", fxmin)\n",
" println(\" * nb_iters = \", nbiters)\n",
" println(\" * flag = \", flag)\n",
" println(\" * sol_exacte : \", sol_exacte)\n",
"end\n",
"\n",
"# Fonction f0\n",
"# -----------\n",
"f0(x) = sin(x)\n",
"# la gradient de la fonction f0\n",
"grad_f0(x) = cos(x)\n",
"# la hessienne de la fonction f0\n",
"hess_f0(x) = -sin(x)\n",
"sol_exacte = -pi/2\n",
"options = []\n",
"\n",
"x0 = sol_exacte\n",
"xmin,f_min,flag,nb_iters = Algorithme_De_Newton(f0,grad_f0,hess_f0,x0,options)\n",
"my_afficher_resultats(\"Newton\",\"f0\",x0,xmin,f_min,flag,sol_exacte,nb_iters)\n",
"x0 = -pi/2+0.5\n",
"xmin,f_min,flag,nb_iters = Algorithme_De_Newton(f0,grad_f0,hess_f0,x0,options)\n",
"my_afficher_resultats(\"Newton\",\"f0\",x0,xmin,f_min,flag,sol_exacte,nb_iters)\n",
"x0 = pi/2\n",
"xmin,f_min,flag,nb_iters = Algorithme_De_Newton(f0,grad_f0,hess_f0,x0,options)\n",
"my_afficher_resultats(\"Newton\",\"f0\",x0,xmin,f_min,flag,sol_exacte,nb_iters)"
"x0 = sol_exacte_f0\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f0, grad_f0, hess_f0, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f0\", x0, xmin, f_min, flag, sol_exacte_f0, nb_iters)\n",
"x0 = -pi / 2 + 0.5\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f0, grad_f0, hess_f0, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f0\", x0, xmin, f_min, flag, sol_exacte_f0, nb_iters)\n",
"x0 = pi / 2\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f0, grad_f0, hess_f0, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f0\", x0, xmin, f_min, flag, sol_exacte_f0, nb_iters)\n",
"\n",
"x0 = sol_exacte_f1\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f1, grad_f1, hess_f1, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f1\", x0, xmin, f_min, flag, sol_exacte_f1, nb_iters)\n",
"x0 = [1, 0, 0]\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f1, grad_f1, hess_f1, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f1\", x0, xmin, f_min, flag, sol_exacte_f1, nb_iters)\n",
"x0 = [10, 3, -2.2]\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f1, grad_f1, hess_f1, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f1\", x0, xmin, f_min, flag, sol_exacte_f1, nb_iters)\n",
"\n",
"x0 = sol_exacte_f2\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f2, grad_f2, hess_f2, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f2\", x0, xmin, f_min, flag, sol_exacte_f2, nb_iters)\n",
"x0 = [-1.2, 1]\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f2, grad_f2, hess_f2, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f2\", x0, xmin, f_min, flag, sol_exacte_f2, nb_iters)\n",
"x0 = [10, 0]\n",
"xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f2, grad_f2, hess_f2, x0, options)\n",
"my_afficher_resultats(\"Newton\", \"f2\", x0, xmin, f_min, flag, sol_exacte_f2, nb_iters)\n",
"# x0 = [0, 1 / 200 + 1 / 10^12] # explose !\n",
"# xmin, f_min, flag, nb_iters = Algorithme_De_Newton(f2, grad_f2, hess_f2, x0, options)\n",
"# my_afficher_resultats(\"Newton\", \"f2\", x0, xmin, f_min, flag, sol_exacte_f2, nb_iters)"
]
},
{
"cell_type": "code",
"execution_count": 123,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[37m\u001b[1mTest Summary: | \u001b[22m\u001b[39m\u001b[32m\u001b[1mPass \u001b[22m\u001b[39m\u001b[36m\u001b[1mTotal\u001b[22m\u001b[39m\n",
"L'algo de Newton | \u001b[32m 14 \u001b[39m\u001b[36m 14\u001b[39m\n"
]
},
{
"data": {
"text/plain": [
"Test.DefaultTestSet(\"L'algo de Newton\", Any[Test.DefaultTestSet(\"Cas test 1 x0 = solution\", Any[Test.DefaultTestSet(\"solution\", Any[], 1, false, false), Test.DefaultTestSet(\"itération\", Any[], 1, false, false)], 0, false, false), Test.DefaultTestSet(\"Cas test 1 x0 = x011\", Any[Test.DefaultTestSet(\"solution\", Any[], 1, false, false), Test.DefaultTestSet(\"itération\", Any[], 1, false, false)], 0, false, false), Test.DefaultTestSet(\"Cas test 1 x0 = x012\", Any[Test.DefaultTestSet(\"solution\", Any[], 1, false, false), Test.DefaultTestSet(\"itération\", Any[], 1, false, false)], 0, false, false), Test.DefaultTestSet(\"Cas test 2 x0 = solution\", Any[Test.DefaultTestSet(\"solution\", Any[], 1, false, false), Test.DefaultTestSet(\"itération\", Any[], 1, false, false)], 0, false, false), Test.DefaultTestSet(\"Cas test 2 x0 = x021\", Any[Test.DefaultTestSet(\"solution\", Any[], 1, false, false), Test.DefaultTestSet(\"itération\", Any[], 1, false, false)], 0, false, false), Test.DefaultTestSet(\"Cas test 2 x0 = x022\", Any[Test.DefaultTestSet(\"solution\", Any[], 1, false, false), Test.DefaultTestSet(\"itération\", Any[], 1, false, false)], 0, false, false), Test.DefaultTestSet(\"Cas test 2 x0 = x023\", Any[Test.DefaultTestSet(\"solution\", Any[], 1, false, false), Test.DefaultTestSet(\"exception\", Any[], 1, false, false)], 0, false, false)], 0, false, false)"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"tester_algo_newton(false, Algorithme_De_Newton)"
]
},
{
@ -140,7 +354,20 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Vos réponses?\n"
"## Vos réponses?\n",
"La méthode de Newton repose sur la recherche des solutions de $\\nabla f(x) = 0$, cet algorithme recherche donc les extremums de $f$ et non uniquement ses minimas.\n",
"\n",
"1. \n",
" 1. Dans le cas où $x_0 = x_{sol}$, les tests de convergence arretent l'algorithme (et rien ne se passe)\n",
" 2. Dans le cas où $x_0 = \\frac{1-\\pi}{2}$, l'algorithme trouve une racine de $cos(x)$ en seulement 2 itérations.\n",
" 3. Dans le cas où $x_0 = \\frac\\pi2$, l'algorithme tombe immédiatement sur une racine de $cos(x)$, il s'arrête mais la solution est un maximum et non un minimum.\n",
"\n",
"2. \n",
" 1. $x_0 = x_{sol}$, donc la convergence en une itération est cohérent.\n",
" 2. $x_0 = [1, 0, 0]$, $\\nabla^2f(x_0) \\setminus \\nabla f (x_0) ~= [0, -1, -1]$. Ainsi après une itération, on retombe sur $[1, 1, 1]$, qui est un minimum de f1.\n",
" 3. $x_0 = [10, 3, -2.2]$, on a le même scénario que précédemment : $\\nabla^2f(x_0) \\setminus \\nabla f (x_0) ~= [9, 2, -3.2]$.\n",
"\n",
"3. $f_2$ ne peut pas converger dans le cas où $x_0= [0, \\frac1{200} + 10^{-12}]$ puisque à partir d'une certaine itération, la hessienne possède des coefficients trop grand et devient numériquement non inversible."
]
},
{
@ -159,11 +386,140 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 105,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Cauchy 1\u001b[22m\u001b[39m\n",
" * s = [0, 0]\n",
" * e = 0\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Cauchy 2\u001b[22m\u001b[39m\n",
" * s = [-0.9230769230769234, -0.30769230769230776]\n",
" * e = 1\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Cauchy 3\u001b[22m\u001b[39m\n",
" * s = [0.8944271909999159, -0.4472135954999579]\n",
" * e = -1\n"
]
}
],
"source": [
"# Vos tests"
"function my_afficher_resultats_cauchy(algo, s, e)\n",
" println(\"-------------------------------------------------------------------------\")\n",
" printstyled(\"Résultats de : \", algo, \"\\n\", bold = true, color = :blue)\n",
" println(\" * s = \", s)\n",
" println(\" * e = \", e)\n",
"end\n",
"\n",
"g1 = [0; 0]\n",
"H1 = [7 0; 0 2]\n",
"delta1 = 1\n",
"s1, e1 = Pas_De_Cauchy(g1, H1, delta1)\n",
"my_afficher_resultats_cauchy(\"Cauchy 1\", s1, e1)\n",
"\n",
"g2 = [6; 2]\n",
"H2 = [7 0; 0 2]\n",
"delta2 = 1\n",
"s2, e2 = Pas_De_Cauchy(g2, H2, delta2)\n",
"my_afficher_resultats_cauchy(\"Cauchy 2\", s2, e2)\n",
"\n",
"g3 = [-2; 1]\n",
"H3 = [-2 0; 0 10]\n",
"delta3 = 1\n",
"s3, e3 = Pas_De_Cauchy(g3, H3, delta3)\n",
"my_afficher_resultats_cauchy(\"Cauchy 3\", s3, e3)"
]
},
{
"cell_type": "code",
"execution_count": 122,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Cauchy 4 = [5.000000000000001, -2.5000000000000004]\n",
"Cauchy 5= [4.47213595499958, -2.23606797749979]\n",
"Cauchy 6= [-4.743416490252569, -1.5811388300841895]\n",
"Cauchy 6= [-2.23606797749979, -4.47213595499958]\n",
"\u001b[37m\u001b[1mTest Summary: | \u001b[22m\u001b[39m\u001b[32m\u001b[1mPass \u001b[22m\u001b[39m\u001b[36m\u001b[1mTotal\u001b[22m\u001b[39m\n",
"Pas de Cauchy | \u001b[32m 7 \u001b[39m\u001b[36m 7\u001b[39m\n"
]
},
{
"data": {
"text/plain": [
"Test.DefaultTestSet(\"Pas de Cauchy\", Any[Test.DefaultTestSet(\"g = 0\", Any[], 1, false, false), Test.DefaultTestSet(\"quad 2, non saturé\", Any[], 1, false, false), Test.DefaultTestSet(\"quad 2, saturé\", Any[], 1, false, false), Test.DefaultTestSet(\"quad 3, non saturé\", Any[], 1, false, false), Test.DefaultTestSet(\"quad 3, saturé\", Any[], 1, false, false), Test.DefaultTestSet(\"quad 3, g'*H*g <0 saturé\", Any[], 1, false, false), Test.DefaultTestSet(\"quad 3, g'*H*g = 0 saturé\", Any[], 1, false, false)], 0, false, false)"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"tester_pas_de_cauchy(false, Pas_De_Cauchy)"
]
},
{
"cell_type": "code",
"execution_count": 108,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : RC-Cauchy appliqué à f0 au point initial -1.5707963267948966:\u001b[22m\u001b[39m\n",
" * xsol = -1.5707963267948966\n",
" * f(xsol) = -1.0\n",
" * nb_iters = 0\n",
" * flag = 0\n",
" * sol_exacte : -1.5707963267948966\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : RC-Cauchy appliqué à f1 au point initial [1, 1, 1]:\u001b[22m\u001b[39m\n",
" * xsol = [1, 1, 1]\n",
" * f(xsol) = 0\n",
" * nb_iters = 0\n",
" * flag = 0\n",
" * sol_exacte : [1, 1, 1]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : RC-Cauchy appliqué à f2 au point initial [1.0, 0.01]:\u001b[22m\u001b[39m\n",
" * xsol = [1.0, 0.01]\n",
" * f(xsol) = 0.0\n",
" * nb_iters = 1\n",
" * flag = 1\n",
" * sol_exacte : [1.0, 0.01]\n"
]
}
],
"source": [
"options = [10, 0.5, 2.00, 0.25, 0.75, 2, 10000, sqrt(eps()), 1e-15]\n",
"algo = \"cauchy\"\n",
"\n",
"x0 = sol_exacte_f0\n",
"xmin, f_min, flag, nb_iters = Regions_De_Confiance(algo, f0, grad_f0, hess_f0, x0, options)\n",
"my_afficher_resultats(\"RC-Cauchy\", \"f0\", x0, xmin, f_min, flag, sol_exacte_f0, nb_iters)\n",
"\n",
"x0 = sol_exacte_f1\n",
"xmin, f_min, flag, nb_iters = Regions_De_Confiance(algo, f1, grad_f1, hess_f1, x0, options)\n",
"my_afficher_resultats(\"RC-Cauchy\", \"f1\", x0, xmin, f_min, flag, sol_exacte_f1, nb_iters)\n",
"\n",
"x0 = sol_exacte_f2\n",
"xmin, f_min, flag, nb_iters = Regions_De_Confiance(algo, f2, grad_f2, hess_f2, x0, options)\n",
"my_afficher_resultats(\"RC-Cauchy\", \"f2\", x0, xmin, f_min, flag, sol_exacte_f2, nb_iters)"
]
},
{
@ -174,17 +530,17 @@
"\n",
"1. Quelle relation lie la fonction test $f_1$ et son modèle de Taylor à lordre 2 ? Comparer alors les performances de Newton et RC-Pas de Cauchy sur cette fonction.\n",
"\n",
"2. Le rayon initial de la région de confiance est un paramètre important dans lanalyse\n",
"de la performance de lalgorithme. Sur quel(s) autre(s) paramètre(s) peut-on jouer\n",
"pour essayer daméliorer cette performance ? Étudier linfluence dau moins deux de\n",
"ces paramètres."
"2. Le rayon initial de la région de confiance est un paramètre important dans lanalyse de la performance de lalgorithme. Sur quel(s) autre(s) paramètre(s) peut-on jouer pour essayer daméliorer cette performance ? Étudier linfluence dau moins deux de ces paramètres."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Vos réponses?\n"
"## Vos réponses?\n",
"1. Si on note $q_1$ le modèle de Taylor à l'ordre 2 de $f_1$, alors $m^1_k(x_k+s) = q_1(s) = f_1(x_k) + g_1^T s + \\frac12 s^T H_k s$.\n",
"\n",
"2. Pour essayer d'améliorer les performances de l'algorithme des régions de confiance avec Cauchy, on peut aussi essayer d'agrandir $\\Delta_{max}$ pour que l'algorithme converge plus rapidement, dans le cas où on observe un grand nombre d'itérations lors de la résolution."
]
},
{
@ -205,11 +561,210 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 119,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Cauchy 3\u001b[22m\u001b[39m\n",
" * s = [2.220446049250313e-16, 1.0]\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : Cauchy 3\u001b[22m\u001b[39m\n",
" * s = [NaN, NaN]\n"
]
}
],
"source": [
"# Vos tests"
"function my_afficher_resultats_gct(algo, s)\n",
" println(\"-------------------------------------------------------------------------\")\n",
" printstyled(\"Résultats de : \", algo, \"\\n\", bold = true, color = :blue)\n",
" println(\" * s = \", s)\n",
"end\n",
"\n",
"options = []\n",
"\n",
"gradf(x) = [-400 * x[1] * (x[2] - x[1]^2) - 2 * (1 - x[1]); 200 * (x[2] - x[1]^2)]\n",
"hessf(x) = [-400*(x[2]-3*x[1]^2)+2 -400*x[1]; -400*x[1] 200]\n",
"xk = [1; 0]\n",
"s = Gradient_Conjugue_Tronque(gradf(xk), hessf(xk), options)\n",
"my_afficher_resultats_gct(\"Cauchy 3\", s)\n",
"\n",
"grad = [0; 0]\n",
"Hess = [7 0; 0 2]\n",
"s = Gradient_Conjugue_Tronque(grad, Hess, options)\n",
"my_afficher_resultats_gct(\"Cauchy 3\", s)"
]
},
{
"cell_type": "code",
"execution_count": 121,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[37mGradient-CT: \u001b[39m\u001b[91m\u001b[1mTest Failed\u001b[22m\u001b[39m at \u001b[39m\u001b[1m/home/laurent/.julia/packages/TestOptinum/OYMgn/src/tester_gct.jl:28\u001b[22m\n",
" Expression: ≈(s, [0.0; 0.0], atol = tol_test)\n",
" Evaluated: [NaN, NaN] ≈ [0.0, 0.0] (atol=0.001)\n",
"Stacktrace:\n",
" [1] \u001b[0m\u001b[1mmacro expansion\u001b[22m\n",
"\u001b[90m @ \u001b[39m\u001b[90m~/.julia/packages/TestOptinum/OYMgn/src/\u001b[39m\u001b[90;4mtester_gct.jl:28\u001b[0m\u001b[90m [inlined]\u001b[39m\n",
" [2] \u001b[0m\u001b[1mmacro expansion\u001b[22m\n",
"\u001b[90m @ \u001b[39m\u001b[90m/build/julia/src/julia-1.6.3/usr/share/julia/stdlib/v1.6/Test/src/\u001b[39m\u001b[90;4mTest.jl:1151\u001b[0m\u001b[90m [inlined]\u001b[39m\n",
" [3] \u001b[0m\u001b[1mtester_gct\u001b[22m\u001b[0m\u001b[1m(\u001b[22m\u001b[90mafficher\u001b[39m::\u001b[0mBool, \u001b[90mGradient_Conjugue_Tronque\u001b[39m::\u001b[0mtypeof(Gradient_Conjugue_Tronque)\u001b[0m\u001b[1m)\u001b[22m\n",
"\u001b[90m @ \u001b[39m\u001b[35mTestOptinum\u001b[39m \u001b[90m~/.julia/packages/TestOptinum/OYMgn/src/\u001b[39m\u001b[90;4mtester_gct.jl:24\u001b[0m\n",
"\u001b[37mGradient-CT: \u001b[39m\u001b[91m\u001b[1mTest Failed\u001b[22m\u001b[39m at \u001b[39m\u001b[1m/home/laurent/.julia/packages/TestOptinum/OYMgn/src/tester_gct.jl:35\u001b[22m\n",
" Expression: ≈(s, (-delta * grad) / norm(grad), atol = tol_test)\n",
" Evaluated: [0.0, 0.0] ≈ [-0.4743416490252569, -0.15811388300841897] (atol=0.001)\n",
"Stacktrace:\n",
" [1] \u001b[0m\u001b[1mmacro expansion\u001b[22m\n",
"\u001b[90m @ \u001b[39m\u001b[90m~/.julia/packages/TestOptinum/OYMgn/src/\u001b[39m\u001b[90;4mtester_gct.jl:35\u001b[0m\u001b[90m [inlined]\u001b[39m\n",
" [2] \u001b[0m\u001b[1mmacro expansion\u001b[22m\n",
"\u001b[90m @ \u001b[39m\u001b[90m/build/julia/src/julia-1.6.3/usr/share/julia/stdlib/v1.6/Test/src/\u001b[39m\u001b[90;4mTest.jl:1151\u001b[0m\u001b[90m [inlined]\u001b[39m\n",
" [3] \u001b[0m\u001b[1mtester_gct\u001b[22m\u001b[0m\u001b[1m(\u001b[22m\u001b[90mafficher\u001b[39m::\u001b[0mBool, \u001b[90mGradient_Conjugue_Tronque\u001b[39m::\u001b[0mtypeof(Gradient_Conjugue_Tronque)\u001b[0m\u001b[1m)\u001b[22m\n",
"\u001b[90m @ \u001b[39m\u001b[35mTestOptinum\u001b[39m \u001b[90m~/.julia/packages/TestOptinum/OYMgn/src/\u001b[39m\u001b[90;4mtester_gct.jl:24\u001b[0m\n",
"\u001b[37mGradient-CT: \u001b[39m"
]
},
{
"ename": "TestSetException",
"evalue": "Some tests did not pass: 0 passed, 2 failed, 1 errored, 0 broken.",
"output_type": "error",
"traceback": [
"Some tests did not pass: 0 passed, 2 failed, 1 errored, 0 broken.\n",
"\n",
"Stacktrace:\n",
" [1] macro expansion\n",
" @ /build/julia/src/julia-1.6.3/usr/share/julia/stdlib/v1.6/Test/src/Test.jl:1161 [inlined]\n",
" [2] tester_gct(afficher::Bool, Gradient_Conjugue_Tronque::typeof(Gradient_Conjugue_Tronque))\n",
" @ TestOptinum ~/.julia/packages/TestOptinum/OYMgn/src/tester_gct.jl:24\n",
" [3] top-level scope\n",
" @ ~/Documents/Cours/ENSEEIHT/S7 - Optimisation numérique/optinum/src/TP-Projet-Optinum.ipynb:1\n",
" [4] eval\n",
" @ ./boot.jl:360 [inlined]\n",
" [5] include_string(mapexpr::typeof(REPL.softscope), mod::Module, code::String, filename::String)\n",
" @ Base ./loading.jl:1116\n",
" [6] #invokelatest#2\n",
" @ ./essentials.jl:708 [inlined]\n",
" [7] invokelatest\n",
" @ ./essentials.jl:706 [inlined]\n",
" [8] (::VSCodeServer.var\"#146#147\"{VSCodeServer.NotebookRunCellArguments, String})()\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/serve_notebook.jl:18\n",
" [9] withpath(f::VSCodeServer.var\"#146#147\"{VSCodeServer.NotebookRunCellArguments, String}, path::String)\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/repl.jl:185\n",
" [10] notebook_runcell_request(conn::VSCodeServer.JSONRPC.JSONRPCEndpoint{Base.PipeEndpoint, Base.PipeEndpoint}, params::VSCodeServer.NotebookRunCellArguments)\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/serve_notebook.jl:14\n",
" [11] dispatch_msg(x::VSCodeServer.JSONRPC.JSONRPCEndpoint{Base.PipeEndpoint, Base.PipeEndpoint}, dispatcher::VSCodeServer.JSONRPC.MsgDispatcher, msg::Dict{String, Any})\n",
" @ VSCodeServer.JSONRPC ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/JSONRPC/src/typed.jl:67\n",
" [12] serve_notebook(pipename::String; crashreporting_pipename::String)\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/serve_notebook.jl:94\n",
" [13] top-level scope\n",
" @ ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/notebook/notebook.jl:12"
]
}
],
"source": [
"tester_gct(false, Gradient_Conjugue_Tronque)"
]
},
{
"cell_type": "code",
"execution_count": 124,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : RC-GCT appliqué à f0 au point initial -1.5707963267948966:\u001b[22m\u001b[39m\n",
" * xsol = -1.5707963267948966\n",
" * f(xsol) = -1.0\n",
" * nb_iters = 0\n",
" * flag = 0\n",
" * sol_exacte : -1.5707963267948966\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : RC-GCT appliqué à f1 au point initial [1, 1, 1]:\u001b[22m\u001b[39m\n",
" * xsol = [1, 1, 1]\n",
" * f(xsol) = 0\n",
" * nb_iters = 0\n",
" * flag = 0\n",
" * sol_exacte : [1, 1, 1]\n",
"-------------------------------------------------------------------------\n",
"\u001b[34m\u001b[1mRésultats de : RC-GCT appliqué à f2 au point initial [1.0, 0.01]:\u001b[22m\u001b[39m\n",
" * xsol = [1.0, 0.01]\n",
" * f(xsol) = 0.0\n",
" * nb_iters = 1\n",
" * flag = 1\n",
" * sol_exacte : [1.0, 0.01]\n"
]
}
],
"source": [
"options = [10, 0.5, 2.00, 0.25, 0.75, 2, 10000, sqrt(eps()), 1e-15]\n",
"algo = \"gct\"\n",
"\n",
"x0 = sol_exacte_f0\n",
"xmin, f_min, flag, nb_iters = Regions_De_Confiance(algo, f0, grad_f0, hess_f0, x0, options)\n",
"my_afficher_resultats(\"RC-GCT\", \"f0\", x0, xmin, f_min, flag, sol_exacte_f0, nb_iters)\n",
"\n",
"x0 = sol_exacte_f1\n",
"xmin, f_min, flag, nb_iters = Regions_De_Confiance(algo, f1, grad_f1, hess_f1, x0, options)\n",
"my_afficher_resultats(\"RC-GCT\", \"f1\", x0, xmin, f_min, flag, sol_exacte_f1, nb_iters)\n",
"\n",
"x0 = sol_exacte_f2\n",
"xmin, f_min, flag, nb_iters = Regions_De_Confiance(algo, f2, grad_f2, hess_f2, x0, options)\n",
"my_afficher_resultats(\"RC-GCT\", \"f2\", x0, xmin, f_min, flag, sol_exacte_f2, nb_iters)"
]
},
{
"cell_type": "code",
"execution_count": 126,
"metadata": {},
"outputs": [
{
"ename": "TestSetException",
"evalue": "Some tests did not pass: 4 passed, 3 failed, 1 errored, 0 broken.",
"output_type": "error",
"traceback": [
"Some tests did not pass: 4 passed, 3 failed, 1 errored, 0 broken.\n",
"\n",
"Stacktrace:\n",
" [1] macro expansion\n",
" @ /build/julia/src/julia-1.6.3/usr/share/julia/stdlib/v1.6/Test/src/Test.jl:1161 [inlined]\n",
" [2] tester_regions_de_confiance(afficher::Bool, Regions_De_Confiance::typeof(Regions_De_Confiance))\n",
" @ TestOptinum ~/.julia/packages/TestOptinum/OYMgn/src/tester_regions_de_confiance.jl:39\n",
" [3] top-level scope\n",
" @ ~/Documents/Cours/ENSEEIHT/S7 - Optimisation numérique/optinum/src/TP-Projet-Optinum.ipynb:1\n",
" [4] eval\n",
" @ ./boot.jl:360 [inlined]\n",
" [5] include_string(mapexpr::typeof(REPL.softscope), mod::Module, code::String, filename::String)\n",
" @ Base ./loading.jl:1116\n",
" [6] #invokelatest#2\n",
" @ ./essentials.jl:708 [inlined]\n",
" [7] invokelatest\n",
" @ ./essentials.jl:706 [inlined]\n",
" [8] (::VSCodeServer.var\"#146#147\"{VSCodeServer.NotebookRunCellArguments, String})()\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/serve_notebook.jl:18\n",
" [9] withpath(f::VSCodeServer.var\"#146#147\"{VSCodeServer.NotebookRunCellArguments, String}, path::String)\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/repl.jl:185\n",
" [10] notebook_runcell_request(conn::VSCodeServer.JSONRPC.JSONRPCEndpoint{Base.PipeEndpoint, Base.PipeEndpoint}, params::VSCodeServer.NotebookRunCellArguments)\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/serve_notebook.jl:14\n",
" [11] dispatch_msg(x::VSCodeServer.JSONRPC.JSONRPCEndpoint{Base.PipeEndpoint, Base.PipeEndpoint}, dispatcher::VSCodeServer.JSONRPC.MsgDispatcher, msg::Dict{String, Any})\n",
" @ VSCodeServer.JSONRPC ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/JSONRPC/src/typed.jl:67\n",
" [12] serve_notebook(pipename::String; crashreporting_pipename::String)\n",
" @ VSCodeServer ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/packages/VSCodeServer/src/serve_notebook.jl:94\n",
" [13] top-level scope\n",
" @ ~/.vscode/extensions/julialang.language-julia-1.5.6/scripts/notebook/notebook.jl:12"
]
}
],
"source": [
"tester_regions_de_confiance(true, Regions_De_Confiance)"
]
},
{
@ -257,7 +812,7 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [

View file

@ -12,7 +12,7 @@ Tester l'algorithme du gradient conjugué tronqué
* la quadratique 5
* la quadratique 6
"""
function tester_gct(afficher::Bool,Gradient_Conjugue_Tronque::Function)
function tester_gct(afficher::Bool, Gradient_Conjugue_Tronque::Function)
tol = 1e-7
max_iter = 100
@ -21,40 +21,40 @@ function tester_gct(afficher::Bool,Gradient_Conjugue_Tronque::Function)
@testset "Gradient-CT" begin
# le cas de test 1
grad = [0 ; 0]
Hess = [7 0 ; 0 2]
grad = [0; 0]
Hess = [7 0; 0 2]
delta = 1
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s [0.0 ; 0.0] atol = tol_test
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s [0.0; 0.0] atol = tol_test
# le cas de test 2 H definie positive
grad = [6 ; 2]
Hess = [7 0 ; 0 2]
grad = [6; 2]
Hess = [7 0; 0 2]
delta = 0.5 # sol = pas de Cauchy
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s -delta*grad/norm(grad) atol = tol_test
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s -delta * grad / norm(grad) atol = tol_test
delta = 1.2 # saturation à la 2ieme itération
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s [-0.8740776099190263, -0.8221850958502244] atol = tol_test
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s [-0.8740776099190263, -0.8221850958502244] atol = tol_test
delta = 3 # sol = min global
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s -Hess\grad atol = tol_test
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s -Hess \ grad atol = tol_test
# le cas test 2 bis matrice avec 1 vp < 0 et 1 vp > 0
grad = [1,2]
Hess = [1 0 ; 0 -1]
delta = 1. # g^T H g < 0 première direction concave
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s -delta*grad/norm(grad) atol = tol_test
grad = [1,0]
grad = [1, 2]
Hess = [1 0; 0 -1]
delta = 1.0 # g^T H g < 0 première direction concave
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s -delta * grad / norm(grad) atol = tol_test
grad = [1, 0]
delta = 0.5 # g^T H g > 0 sol pas de Cauchy
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s -delta*grad/norm(grad) atol = tol_test
grad = [2,1] # g^T H g > 0 sol à l'itération 2, saturation
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s -delta * grad / norm(grad) atol = tol_test
grad = [2, 1] # g^T H g > 0 sol à l'itération 2, saturation
delta = 6
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s [0.48997991959774634, 5.979959839195494] atol = tol_test
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s [0.48997991959774634, 5.979959839195494] atol = tol_test
# le cas de test 3
#grad = [-2 ; 1]
#Hess = [-2 0 ; 0 10]
@ -70,18 +70,18 @@ function tester_gct(afficher::Bool,Gradient_Conjugue_Tronque::Function)
#@test s ≈ [0.0 ; 0.0] atol = tol_test
# le cas de test 5
grad = [2 ; 3]
Hess = [4 6 ; 6 5]
grad = [2; 3]
Hess = [4 6; 6 5]
delta = 3
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s [1.9059020876695578 ; -2.3167946029410595] atol = tol_test
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s [1.9059020876695578; -2.3167946029410595] atol = tol_test
# le cas de test 6
# Le pas de Cauchy conduit à un gradient nul en 1 itération
grad = [2 ; 0]
Hess = [4 0 ; 0 -15]
grad = [2; 0]
Hess = [4 0; 0 -15]
delta = 2
s = Gradient_Conjugue_Tronque(grad,Hess,[delta;max_iter;tol])
@test s [-0.5 ; 0.0] atol = tol_test
s = Gradient_Conjugue_Tronque(grad, Hess, [delta; max_iter; tol])
@test s [-0.5; 0.0] atol = tol_test
end
end