Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
A
artiq_experiments
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Nicolas Nunez Barreto
artiq_experiments
Commits
5d2ee8a5
Commit
5d2ee8a5
authored
Jan 08, 2024
by
Marcelo Luda
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix
parent
aa39512d
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
615 additions
and
2 deletions
+615
-2
lolo_ajuste_3iones_A.py
...plots/20231123_CPTconmicromocion3/lolo_ajuste_3iones_A.py
+597
-0
lolo_graficos_pub.py
...is/plots/20231123_CPTconmicromocion3/lolo_graficos_pub.py
+18
-2
No files found.
analisis/plots/20231123_CPTconmicromocion3/lolo_ajuste_3iones_A.py
0 → 100644
View file @
5d2ee8a5
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Acá hacemos el ajuste de 3 iones del archivo
artiq_experiments/analisis/plots/20231123_CPTconmicromocion3/CPT_plotter_20231123.py
sacado de la parte que dice:
"ajusto la mergeada de 3 iones"
@author: lolo
"""
import
h5py
import
matplotlib.pyplot
as
plt
import
numpy
as
np
from
scipy.optimize
import
curve_fit
from
Data.EITfit.lolo_modelo_full_8niveles
import
PerformExperiment_8levels_MM
import
time
#%% Funciones auxiliares para información estadística
from
scipy.stats.distributions
import
t
,
chi2
import
inspect
def
estadistica
(
datos_x
,
datos_y
,
modelo
,
pcov
,
parametros
,
nombres
=
None
,
alpha
=
0.05
):
if
nombres
is
None
:
nombres
=
inspect
.
getfullargspec
(
modelo
)
.
args
[
1
:]
# Cantidad de parámetros
P
=
len
(
parametros
)
# Número de datos
N
=
len
(
datos_x
)
# Grados de libertas (Degrees Of Freedom)
dof
=
N
-
P
-
1
# Cauculamos coordenadas del modelo
# modelo_x = datos_x if modelo_x_arr is None else modelo_x_arr
# modelo_y = modelo( modelo_x, *parametros )
# Predicción del modelo para los datos_x medidos
prediccion_modelo
=
modelo
(
datos_x
,
*
parametros
)
# Calculos de cantidades estadísticas relevantes
COV
=
pcov
# Matriz de Covarianza
SE
=
np
.
sqrt
(
np
.
diag
(
COV
))
# Standar Error / Error estandar de los parámetros
residuos
=
datos_y
-
prediccion_modelo
# diferencia enrte el modelo y los datos
SSE
=
sum
((
residuos
)
**
2
)
# Resitual Sum of Squares
SST
=
sum
((
datos_y
-
np
.
mean
(
datos_y
))
**
2
)
# Total Sum of Squares
# http://en.wikipedia.org/wiki/Coefficient_of_determination
# Expresa el porcentaje de la varianza que logra explicar el modelos propuesto
Rsq
=
1
-
SSE
/
SST
# Coeficiente de determinación
Rsq_adj
=
1
-
(
1
-
Rsq
)
*
(
N
-
1
)
/
(
N
-
P
-
1
)
# Coeficiente de determinación Ajustado
# https://en.wikipedia.org/wiki/Pearson_correlation_coefficient#In_least_squares_regression_analysis
# Expresa la correlación que hay entre los datos y la predicción del modelo
r_pearson
=
np
.
corrcoef
(
datos_y
,
prediccion_modelo
)[
0
,
1
]
# Reduced chi squared
# https://en.wikipedia.org/wiki/Reduced_chi-squared_statistic
chi2_
=
sum
(
residuos
**
2
)
/
N
chi2_red
=
sum
(
residuos
**
2
)
/
(
N
-
P
)
# Chi squared test
chi2_test
=
sum
(
residuos
**
2
/
abs
(
prediccion_modelo
)
)
# p-value del ajuste
p_val
=
chi2
(
dof
)
.
cdf
(
chi2_test
)
sT
=
t
.
ppf
(
1.0
-
alpha
/
2.0
,
N
-
P
)
# student T multiplier
CI
=
sT
*
SE
# Confidence Interval
return
dict
(
R2
=
Rsq
,
R2_adj
=
Rsq_adj
,
chi2
=
chi2_
,
chi2_red
=
chi2_red
,
chi2_test
=
chi2_test
,
r
=
r_pearson
,
pvalue
=
p_val
,
SE
=
SE
,
CI
=
CI
,
parametros
=
parametros
,
nombres
=
nombres
,
alpha
=
alpha
,
N
=
N
)
def
print_estadistica
(
dic
):
pars
=
'N,pvalue,R2,R2_adj,chi2,chi2_red,chi2_test,r,SE,CI,parametros,nombres,alpha'
.
split
(
','
)
N
,
pvalue
,
R2
,
R2_adj
,
chi2
,
chi2_red
,
chi2_test
,
r
,
SE
,
CI
,
parametros
,
nombres
,
alpha
=
[
dic
[
k
]
for
k
in
pars
]
P
=
len
(
parametros
)
print
(
'R-squared '
,
R2
)
print
(
'R-sq_adjusted'
,
R2_adj
)
print
(
'chi2 '
,
chi2
)
print
(
'chi2_reduced '
,
chi2_red
)
print
(
'chi2_test '
,
chi2_test
)
print
(
'r-pearson '
,
r
)
print
(
'p-value '
,
pvalue
)
print
(
''
)
print
(
'Error Estandard (SE):'
)
for
i
in
range
(
P
):
print
(
f
'parametro[{nombres[i]:>5s}]: '
,
parametros
[
i
],
' ± '
,
SE
[
i
])
print
(
''
)
print
(
'Intervalo de confianza al '
+
str
((
1
-
alpha
)
*
100
)
+
'
%
:'
)
for
i
in
range
(
P
):
print
(
f
'parametro[{nombres[i]:>5s}]: '
,
parametros
[
i
],
' ± '
,
CI
[
i
])
def
fit
(
model
,
x_data
,
y_data
,
fixed
=
None
,
plot
=
False
,
**
kwargs
):
"""Usa los mismos argumentos que curve_fit pero permite fixear parámetros y
visualizar el ajuste
fixed: nombres de los parametros fijos, con espacios
plot: si plotea o no el ajuste
"""
# Extraemos los parámetros del modelo
param_names
=
inspect
.
getfullargspec
(
model
)
.
args
[
1
:]
if
not
'p0'
in
kwargs
.
keys
():
kwargs
[
'p0'
]
=
np
.
ones
(
len
(
param_names
))
params_initial
=
kwargs
[
'p0'
]
# Nos fijamos si hay que fijar algo
if
isinstance
(
fixed
,
str
):
fixed
=
fixed
.
split
()
if
fixed
is
None
:
fixed
=
[]
# Armoa lista de True/False de los parametros fijos
params_fixed
=
[
True
if
p
in
fixed
else
False
for
p
in
param_names
]
if
True
in
params_fixed
and
'bounds'
in
kwargs
.
keys
():
try
:
b0
=
[
bb
for
fi
,
bb
in
zip
(
params_fixed
,
kwargs
[
'bounds'
][
0
])
if
fi
==
False
]
except
:
b0
=
kwargs
[
'bounds'
][
0
]
try
:
b1
=
[
bb
for
fi
,
bb
in
zip
(
params_fixed
,
kwargs
[
'bounds'
][
1
])
if
fi
==
False
]
except
:
b1
=
kwargs
[
'bounds'
][
0
]
kwargs
[
'bounds'
]
=
(
b0
,
b1
)
print
(
"Parámetros:"
,
param_names
)
# print(locals())
print
(
'
\n
'
)
if
plot
:
plt
.
figure
()
plt
.
plot
(
x_data
,
y_data
,
'.'
)
ylim
=
plt
.
gca
()
.
get_ylim
()
l1
,
=
plt
.
plot
(
x_data
,
model
(
x_data
,
*
params_initial
),
'r-'
)
plt
.
ylim
(
ylim
)
# Función auxiliar para el ajuste
def
aux_func
(
x_data
,
*
args
):
all_params
=
np
.
ones
(
len
(
param_names
))
jj
=
0
for
ii
in
range
(
len
(
all_params
)):
if
params_fixed
[
ii
]:
all_params
[
ii
]
=
params_initial
[
ii
]
else
:
all_params
[
ii
]
=
args
[
jj
]
jj
+=
1
# print(params_fixed,all_params)
rta
=
model
(
x_data
,
*
all_params
)
if
plot
:
l1
.
set_ydata
(
rta
)
plt
.
draw
()
plt
.
pause
(
0.01
)
return
rta
# Hacemos el curve_fit
t0
=
time
.
time
()
print
(
"Arancamos el fit"
)
kwargs
[
'p0'
]
=
[
p
for
p
,
ff
in
zip
(
params_initial
,
params_fixed
)
if
ff
is
False
]
params
,
pcov
=
curve_fit
(
aux_func
,
x_data
,
y_data
,
**
kwargs
)
print
(
f
"time: {round(time.time()-t0,1)} seg"
)
# Rearmamos los parámetros para incluir los fijos
params2
=
np
.
ones
(
len
(
param_names
))
jj
=
0
for
ii
in
range
(
len
(
params2
)):
if
params_fixed
[
ii
]:
params2
[
ii
]
=
params_initial
[
ii
]
else
:
params2
[
ii
]
=
params
[
jj
]
jj
+=
1
# Rearmo la pcov
ii
,
a
=
0
,
[]
for
fixed
in
params_fixed
:
if
fixed
:
a
.
append
(
[
0
]
*
len
(
pcov
[
0
])
)
else
:
a
.
append
(
pcov
[
ii
]
.
tolist
()
)
ii
+=
1
a
=
np
.
array
(
a
)
ii
,
b
=
0
,
[]
for
fixed
in
params_fixed
:
if
fixed
:
b
.
append
(
[
0
]
*
len
(
a
.
T
[
0
])
)
else
:
b
.
append
(
a
.
T
[
ii
]
.
tolist
()
)
ii
+=
1
pcov2
=
np
.
array
(
b
)
# Caluclo información estadística
info
=
estadistica
(
x_data
,
y_data
,
model
,
pcov2
,
params2
,
nombres
=
None
,
alpha
=
0.05
)
print_estadistica
(
info
)
return
params2
,
pcov2
,
info
# fit(modelo, FreqsDR, CountsDR/CountsDR.sum(), p0=(1,1,1),bounds=(-1e6,1e6) )
# fit(modelo, FreqsDR, CountsDR/CountsDR.sum(), fixed="", plot=True, p0=(1,2,3),bounds=(-1e6,1e6) )
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Levantamos los datos %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
Primero tengo mediciones de espectros cpt de un ion variando la tension dc_A
"""
#C:\Users\Usuario\Documents\artiq\artiq_experiments\analisis\plots\20220106_CPT_DosLaseres_v08_TISA_DR\Data
# os.chdir('../20231123_CPTconmicromocion3/Data/')
folder
=
'../20231123_CPTconmicromocion3/Data/'
CPT_FILES
=
f
"""
{folder}/000016262-IR_Scan_withcal_optimized
{folder}/000016239-IR_Scan_withcal_optimized
{folder}/000016240-IR_Scan_withcal_optimized
{folder}/000016241-IR_Scan_withcal_optimized
{folder}/000016244-IR_Scan_withcal_optimized
{folder}/000016255-IR_Scan_withcal_optimized
{folder}/000016256-IR_Scan_withcal_optimized
{folder}/000016257-IR_Scan_withcal_optimized
"""
def
SeeKeys
(
files
):
for
i
,
fname
in
enumerate
(
files
.
split
()):
data
=
h5py
.
File
(
fname
+
'.h5'
,
'r'
)
# Leo el h5: Recordar que nuestros datos estan en 'datasets'
print
(
fname
)
print
(
list
(
data
[
'datasets'
]
.
keys
()))
print
(
SeeKeys
(
CPT_FILES
))
#carpeta pc nico labo escritorio:
#C:\Users\Usuario\Documents\artiq\artiq_experiments\analisis\plots\20211101_CPT_DosLaseres_v03\Data
Counts
=
[]
Freqs
=
[]
AmpTisa
=
[]
UVCPTAmp
=
[]
No_measures
=
[]
Voltages
=
[]
for
i
,
fname
in
enumerate
(
CPT_FILES
.
split
()):
print
(
str
(
i
)
+
' - '
+
fname
)
#print(fname)
data
=
h5py
.
File
(
fname
+
'.h5'
,
'r'
)
# Leo el h5: Recordar que nuestros datos estan en 'datasets'
# Aca hago algo repugnante para poder levantar los strings que dejamos
# que además tenian un error de tipeo al final. Esto no deberá ser necesario
# cuando se solucione el error este del guardado.
Freqs
.
append
(
np
.
array
(
data
[
'datasets'
][
'IR1_Frequencies'
]))
Counts
.
append
(
np
.
array
(
data
[
'datasets'
][
'data_array'
]))
#AmpTisa.append(np.array(data['datasets']['TISA_CPT_amp']))
UVCPTAmp
.
append
(
np
.
array
(
data
[
'datasets'
][
'UV_CPT_amp'
]))
No_measures
.
append
(
np
.
array
(
data
[
'datasets'
][
'no_measures'
]))
Voltages
.
append
(
np
.
array
(
data
[
'datasets'
][
'scanning_voltages'
]))
def
Split
(
array
,
n
):
length
=
len
(
array
)
/
n
splitlist
=
[]
jj
=
0
while
jj
<
length
:
partial
=
[]
ii
=
0
while
ii
<
n
:
partial
.
append
(
array
[
jj
*
n
+
ii
])
ii
=
ii
+
1
splitlist
.
append
(
partial
)
jj
=
jj
+
1
return
splitlist
CountsSplit
=
[]
CountsSplit
.
append
(
Split
(
Counts
[
0
],
len
(
Freqs
[
0
])))
CountsSplit_2ions
=
[]
CountsSplit_2ions
.
append
(
Split
(
Counts
[
4
],
len
(
Freqs
[
4
])))
###############################################################################
"""
AHORA VAMOS A MEDICIONES CON MAS DE UN ION!!!
Las mediciones estan buenas, habria que ver de ajustarlas bien, yo no lo logre.
"""
"""
Ploteo la cpt de referencia / plotting the reference CPT
1: 2 iones, -100 mV dcA
2: 2 iones, -150 mV dcA
3: 2 iones, -50 mV dcA
4: 2 iones, 5 voltajes (el ion se va en la 4ta medicion y en la 5ta ni esta)
5, 6 y 7: 3 iones en donde el scaneo esta centrado en distintos puntos
"""
"""
Mergeo la 5, 6 y 7
"""
Freqs5
=
[
2
*
f
*
1e-6
for
f
in
Freqs
[
5
]]
Freqs6
=
[
2
*
f
*
1e-6
for
f
in
Freqs
[
6
]]
Freqs7
=
[
2
*
f
*
1e-6
for
f
in
Freqs
[
7
]]
Counts5
=
Counts
[
5
]
Counts6
=
Counts
[
6
]
Counts7
=
Counts
[
7
]
i_1_ini
=
0
i_1
=
36
i_2_ini
=
0
i_2
=
24
f_1
=
18
f_2
=
30
scale_1
=
0.92
scale_2
=
0.98
#Merged_freqs_test = [f-f_2 for f in Freqs6[i_2_ini:i_2]]+[f-f_1 for f in Freqs5[i_1_ini:i_1]]+Freqs7
#plt.plot(Merged_freqs_test,'o')
Merged_freqs
=
[
f
-
f_2
for
f
in
Freqs6
[
0
:
i_2
]]
+
[
f
-
f_1
for
f
in
Freqs5
[
0
:
i_1
]]
+
Freqs7
Merged_counts
=
[
scale_2
*
c
for
c
in
Counts6
[
0
:
i_2
]]
+
[
scale_1
*
c
for
c
in
Counts5
[
0
:
i_1
]]
+
list
(
Counts7
)
Merged_freqs_rescaled
=
np
.
linspace
(
np
.
min
(
Merged_freqs
),
np
.
max
(
Merged_freqs
),
len
(
Merged_freqs
))
#drs = [391.5, 399.5, 405.5, 414]
drs
=
[
370
,
379
,
385
,
391.5
]
if
False
:
plt
.
figure
()
jvec
=
[
2
]
i
=
0
drive
=
22.1
for
j
in
jvec
:
plt
.
plot
([
f
-
f_1
for
f
in
Freqs5
[
0
:
i_1
]],
[
scale_1
*
c
for
c
in
Counts5
[
0
:
i_1
]],
'o'
)
plt
.
plot
([
f
-
f_2
for
f
in
Freqs6
[
0
:
i_2
]],
[
scale_2
*
c
for
c
in
Counts6
[
0
:
i_2
]],
'o'
)
plt
.
plot
(
Freqs7
,
Counts7
,
'o'
)
plt
.
errorbar
(
Merged_freqs
,
Merged_counts
,
yerr
=
np
.
sqrt
(
Merged_counts
),
fmt
=
'o'
,
capsize
=
2
,
markersize
=
2
)
i
=
i
+
1
plt
.
xlabel
(
'Frecuencia (MHz)'
)
plt
.
ylabel
(
'counts'
)
plt
.
grid
()
for
dr
in
drs
:
plt
.
axvline
(
dr
)
plt
.
axvline
(
dr
+
drive
,
color
=
'red'
,
linestyle
=
'dashed'
,
alpha
=
0.3
)
plt
.
axvline
(
dr
-
drive
,
color
=
'red'
,
linestyle
=
'dashed'
,
alpha
=
0.3
)
plt
.
legend
()
#%% Funciones para hacer ajustes
"""
ajusto la mergeada de 3 iones
"""
import
numba
phidoppler
,
titadoppler
=
0
,
90
phirepump
,
titarepump
=
0
,
0
phiprobe
=
0
titaprobe
=
90
Temp
=
0.5e-3
sg
=
0.544
sp
=
4.5
sr
=
0
DetRepump
=
0
lw
=
0.1
DopplerLaserLinewidth
,
RepumpLaserLinewidth
,
ProbeLaserLinewidth
=
lw
,
lw
,
lw
#ancho de linea de los laseres
u
=
32.5e6
#B = (u/(2*np.pi))/c
correccion
=
-
20
offsetxpi
=
438
+
correccion
DetDoppler
=
-
35
-
correccion
-
22
gPS
,
gPD
,
=
2
*
np
.
pi
*
21.58e6
,
2
*
np
.
pi
*
1.35e6
alpha
=
0
drivefreq
=
2
*
np
.
pi
*
22.135
*
1e6
FreqsDR
=
[
f
-
offsetxpi
for
f
in
Merged_freqs
]
CountsDR
=
Merged_counts
freqslong
=
np
.
arange
(
min
(
FreqsDR
),
max
(
FreqsDR
)
+
FreqsDR
[
1
]
-
FreqsDR
[
0
],
0.1
*
(
FreqsDR
[
1
]
-
FreqsDR
[
0
]))
CircPr
=
1
alpha
=
0
# @numba.jit
def
FitEIT_MM1
(
freqs
,
SG
,
SP
,
SCALE1
,
OFFSET
,
BETA1
):
#def FitEIT_MM(freqs, SG, SP, SCALE1, OFFSET, BETA1):
#BETA = 1.8
# SG = 0.6
# SP = 8.1
TEMP
=
0.1e-3
#BETA1, BETA2, BETA3 = 0, 0, 2
Detunings
,
Fluorescence1
=
PerformExperiment_8levels_MM
(
SG
,
SP
,
gPS
,
gPD
,
DetDoppler
,
u
,
DopplerLaserLinewidth
,
ProbeLaserLinewidth
,
TEMP
,
alpha
,
phidoppler
,
titadoppler
,
phiprobe
,
titaprobe
,
BETA1
,
drivefreq
,
min
(
freqs
),
max
(
freqs
)
+
(
freqs
[
1
]
-
freqs
[
0
]),
freqs
[
1
]
-
freqs
[
0
],
circularityprobe
=
CircPr
,
plot
=
False
,
solvemode
=
1
,
detpvec
=
None
)
ScaledFluo1
=
np
.
array
([
f
*
SCALE1
for
f
in
Fluorescence1
])
return
ScaledFluo1
+
OFFSET
#return ScaledFluo1
# @numba.jit
def
FitEIT_MM
(
freqs
,
SG
,
SP
,
SCALE1
,
SCALE2
,
SCALE3
,
OFFSET
,
BETA1
,
BETA2
,
BETA3
):
#def FitEIT_MM(freqs, SG, SP, SCALE1, OFFSET, BETA1):
#BETA = 1.8
# SG = 0.6
# SP = 8.1
TEMP
=
0.1e-3
freqs
=
np
.
array
(
freqs
)
#BETA1, BETA2, BETA3 = 0, 0, 2
Detunings
,
Fluorescence1
=
PerformExperiment_8levels_MM
(
SG
,
SP
,
gPS
,
gPD
,
DetDoppler
,
u
,
DopplerLaserLinewidth
,
ProbeLaserLinewidth
,
TEMP
,
alpha
,
phidoppler
,
titadoppler
,
phiprobe
,
titaprobe
,
BETA1
,
drivefreq
,
min
(
freqs
),
max
(
freqs
)
+
(
freqs
[
1
]
-
freqs
[
0
]),
freqs
[
1
]
-
freqs
[
0
],
circularityprobe
=
CircPr
,
plot
=
False
,
solvemode
=
1
,
detpvec
=
None
)
Detunings
,
Fluorescence2
=
PerformExperiment_8levels_MM
(
SG
,
SP
,
gPS
,
gPD
,
DetDoppler
,
u
,
DopplerLaserLinewidth
,
ProbeLaserLinewidth
,
TEMP
,
alpha
,
phidoppler
,
titadoppler
,
phiprobe
,
titaprobe
,
BETA2
,
drivefreq
,
min
(
freqs
),
max
(
freqs
)
+
(
freqs
[
1
]
-
freqs
[
0
]),
freqs
[
1
]
-
freqs
[
0
],
circularityprobe
=
CircPr
,
plot
=
False
,
solvemode
=
1
,
detpvec
=
None
)
Detunings
,
Fluorescence3
=
PerformExperiment_8levels_MM
(
SG
,
SP
,
gPS
,
gPD
,
DetDoppler
,
u
,
DopplerLaserLinewidth
,
ProbeLaserLinewidth
,
TEMP
,
alpha
,
phidoppler
,
titadoppler
,
phiprobe
,
titaprobe
,
BETA3
,
drivefreq
,
min
(
freqs
),
max
(
freqs
)
+
(
freqs
[
1
]
-
freqs
[
0
]),
freqs
[
1
]
-
freqs
[
0
],
circularityprobe
=
CircPr
,
plot
=
False
,
solvemode
=
1
,
detpvec
=
None
)
# ScaledFluo1 = np.array([f*SCALE1 for f in Fluorescence1])
# ScaledFluo2 = np.array([f*SCALE2 for f in Fluorescence2])
# ScaledFluo3 = np.array([f*SCALE3 for f in Fluorescence3])
# return ScaledFluo1+ScaledFluo2+ScaledFluo3+OFFSET
Fluorescence1
=
np
.
array
(
Fluorescence1
)
Fluorescence2
=
np
.
array
(
Fluorescence2
)
Fluorescence3
=
np
.
array
(
Fluorescence3
)
return
SCALE1
*
Fluorescence1
+
SCALE2
*
Fluorescence2
+
SCALE3
*
Fluorescence3
+
OFFSET
#%% Acá hay un ajuste del de 3 iones que da razonable
# FreqsDR, CountsDR = np.array(FreqsDR) , np.array(CountsDR)
# freqs = np.array(FreqsDR)
# t0 = time.time()
# print("Arranamos FIT 1")
# par_ini = [0.65, 7.06, 86070, 3917, 1.64]
# popt_1ions, pcov_1ions = curve_fit(FitEIT_MM1, FreqsDR, CountsDR, p0=par_ini, bounds=((0, 0, 0, 0, 0), (2, 20, 5e8, 7e3, 10)))
# print(f"time: {round(time.time()-t0,1)} seg")
# par_ini = popt_1ions.tolist()[:2]+[1/3]*2 + [0.1] +popt_1ions.tolist()[-1:]*3
# bounds = ((0, 0, 0, 0, 0, 0, 0, 0),
# (2, 20, 1, 1, 1, 10, 10, 10))
# # bounds = (-np.inf, np.inf)
# print("Arranamos FIT 3")
# def fun(freqs, SG, SP, SCALE1, SCALE2, OFFSET, BETA1, BETA2, BETA3):
# SCALE3 = max(1-SCALE2-SCALE1-OFFSET,0)
# Fluorescence3 = FitEIT_MM(freqs, SG, SP, SCALE1, SCALE2, SCALE3, OFFSET, BETA1, BETA2, BETA3)
# try:
# l1.set_ydata(Fluorescence3/sum(Fluorescence3)*sum(CountsDR))
# except:
# print("Primer corrida")
# finally:
# print(SG, SP, SCALE1, SCALE2, OFFSET, BETA1, BETA2, BETA3)
# plt.pause(0.1)
# return Fluorescence3/sum(Fluorescence3)
# plt.figure()
# plt.errorbar(freqs, CountsDR, yerr=2*np.sqrt(CountsDR), fmt='o', color='darkgreen', alpha=0.5, capsize=2, markersize=2)
# l1, =plt.plot(freqs, fun(freqs, *par_ini )*sum(CountsDR))
# plt.draw()
# popt_3ions, pcov_3ions = curve_fit(fun, FreqsDR, CountsDR/CountsDR.sum(), p0=par_ini,bounds=bounds )
# print(f"time: {round(time.time()-t0,1)} seg")
# SG, SP, SCALE1, SCALE2, OFFSET, BETA1, BETA2, BETA3 = popt_3ions
# SCALE3 = max(1-SCALE2-SCALE1-OFFSET,0)
# # Fluorescence= FitEIT_MM(freqs, SG, SP, SCALE1, SCALE2, SCALE3, OFFSET, BETA1, BETA2, BETA3)
# Fluorescence= fun(freqs, SG, SP, SCALE1, SCALE2, OFFSET, BETA1, BETA2, BETA3)
# plt.plot(freqs, Fluorescence*sum(CountsDR))
# if False:
# popt_3ions = np.array([0.70790618, 7.41165226, 0.1707309 , 0.13974759, 0.02322333,
# 3.69298275, 3.68847693, 1.36216489])
#%% Armo algo para meter parametros fijos
FreqsDR
,
CountsDR
=
np
.
array
(
FreqsDR
)
,
np
.
array
(
CountsDR
)
t0
=
time
.
time
()
print
(
"Arranamos FIT 1"
)
par_ini
=
[
0.65
,
7.06
,
86070
,
3917
,
1.64
]
popt_1ions
,
pcov_1ions
,
info1
=
fit
(
FitEIT_MM1
,
FreqsDR
,
CountsDR
,
p0
=
par_ini
,
bounds
=
((
0
,
0
,
0
,
0
,
0
),
(
2
,
20
,
5e8
,
7e3
,
10
)))
print
(
f
"time: {round(time.time()-t0,1)} seg"
)
par_ini
=
popt_1ions
.
tolist
()[:
2
]
+
[
1
/
3
]
*
2
+
[
0.1
]
+
popt_1ions
.
tolist
()[
-
1
:]
*
3
bounds
=
((
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
),
(
2
,
20
,
1
,
1
,
1
,
10
,
10
,
10
))
def
fun
(
freqs
,
SG
,
SP
,
SCALE1
,
SCALE2
,
OFFSET
,
BETA1
,
BETA2
,
BETA3
):
SCALE3
=
max
(
1
-
SCALE2
-
SCALE1
-
OFFSET
,
0
)
Fluorescence3
=
FitEIT_MM
(
freqs
,
SG
,
SP
,
SCALE1
,
SCALE2
,
SCALE3
,
OFFSET
,
BETA1
,
BETA2
,
BETA3
)
return
Fluorescence3
/
sum
(
Fluorescence3
)
par_ini
=
np
.
array
([
0.64872536
,
7.06495239
,
0.9985873
,
0.9985873
,
0.09090475
,
1.63941009
,
1.63941006
,
1.80228481
])
SG
,
SP
,
SCALE1
,
SCALE2
,
OFFSET
,
BETA1
,
BETA2
,
BETA3
=
par_ini
SCALE1
,
SCALE2
=
0.3
,
0.3
par_ini
=
SG
,
SP
,
SCALE1
,
SCALE2
,
OFFSET
,
BETA1
,
BETA2
,
BETA3
t0
=
time
.
time
()
print
(
"Arranamos FIT 1"
)
popt_3ions
,
pcov_3ions
,
info2
=
fit
(
fun
,
FreqsDR
,
CountsDR
/
sum
(
CountsDR
),
fixed
=
"SG SP"
,
plot
=
True
,
p0
=
par_ini
,
bounds
=
bounds
)
print
(
f
"time: {round(time.time()-t0,1)} seg"
)
par_ini
=
popt_3ions
t0
=
time
.
time
()
print
(
"Arranamos FIT 1"
)
popt_3ions
,
pcov_3ions
,
info2
=
fit
(
fun
,
FreqsDR
,
CountsDR
/
sum
(
CountsDR
),
fixed
=
""
,
plot
=
True
,
p0
=
par_ini
,
bounds
=
bounds
)
print
(
f
"time: {round(time.time()-t0,1)} seg"
)
analisis/plots/20231123_CPTconmicromocion3/lolo_graficos_pub.py
View file @
5d2ee8a5
...
@@ -677,6 +677,22 @@ arr = mpatches.FancyArrowPatch((x0, y0*1.1), (x0/2, y0*1.1),
...
@@ -677,6 +677,22 @@ arr = mpatches.FancyArrowPatch((x0, y0*1.1), (x0/2, y0*1.1),
ax_dib
.
add_patch
(
arr
)
ax_dib
.
add_patch
(
arr
)
ax_dib
.
annotate
(
"ion position"
,
(
.5
,
.5
),
xycoords
=
arr
,
ha
=
'center'
,
va
=
'bottom'
,
color
=
'C1'
)
ax_dib
.
annotate
(
"ion position"
,
(
.5
,
.5
),
xycoords
=
arr
,
ha
=
'center'
,
va
=
'bottom'
,
color
=
'C1'
)
bbox
=
ax_dib
.
get_window_extent
()
.
transformed
(
fig
.
dpi_scale_trans
.
inverted
())
aspect_ratio
=
bbox
.
width
/
bbox
.
height
ax_dib
.
plot
(
0.1
,
0
,
">k"
,
transform
=
ax_dib
.
transAxes
,
clip_on
=
False
,
ms
=
3
,
lw
=
1
)
ax_dib
.
plot
([
0
,
0.1
],
[
0
,
0
],
"-k"
,
transform
=
ax_dib
.
transAxes
,
clip_on
=
False
,
lw
=
1
)
ax_dib
.
plot
(
0
,
0.1
*
aspect_ratio
,
"^k"
,
transform
=
ax_dib
.
transAxes
,
clip_on
=
False
,
ms
=
3
,
lw
=
1
)
ax_dib
.
plot
([
0
,
0
],
[
0
,
0.1
*
aspect_ratio
],
"-k"
,
transform
=
ax_dib
.
transAxes
,
clip_on
=
False
,
lw
=
1
)
ax_dib
.
text
(
0.1
,
0.13
,
"x"
,
transform
=
ax_dib
.
transAxes
,
fontsize
=
8
,
va
=
'top'
)
ax_dib
.
text
(
0.03
,
0.13
*
aspect_ratio
,
"y"
,
transform
=
ax_dib
.
transAxes
,
fontsize
=
8
,
va
=
'top'
)
# Leyenda ##############################
# Leyenda ##############################
# h1, l1 = ax_central.get_legend_handles_labels()
# h1, l1 = ax_central.get_legend_handles_labels()
# h2, l2 = axx[0,0].get_legend_handles_labels()
# h2, l2 = axx[0,0].get_legend_handles_labels()
...
@@ -688,8 +704,8 @@ fig.align_ylabels([ax_central,ax_res])
...
@@ -688,8 +704,8 @@ fig.align_ylabels([ax_central,ax_res])
fig
.
tight_layout
()
fig
.
tight_layout
()
#
fig.savefig('grafico_central_opcion_B.png', dpi=300)
fig
.
savefig
(
'grafico_central_opcion_B.png'
,
dpi
=
300
)
#
fig.savefig('grafico_central_opcion_B.pdf')
fig
.
savefig
(
'grafico_central_opcion_B.pdf'
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment