functions.py 26.6 KB
Newer Older
1
# -*- coding: utf-8 -*-
2
"""Basic DataModule functions.
3

4
5
This contains functions typically used for the datamodule, like loading and
saving.
6
"""
7

8
9
10
import glob
import re
import os
11
12
import pickle
import numpy as np
13
import pandas as pd
14
from .data_xy import data_xy
15
from .data_surface import data_surface
16
from .data_complex import data_complex
17
18
from .data_grid import data_grid
from .data_table import data_table
19
from .data_line import data_line
Raffi's avatar
Raffi committed
20
from .data_IQ import data_IQ
21
22
import matplotlib.pyplot as plt
import matplotlib
23
from .version import __version__
24
from bokeh.palettes import Category10_10
25
import bokeh.plotting as bp
26

27
current_types = (data_xy, data_grid, data_complex, data_table)
28
# Function Library ############################################################
29
def load_datamodule(filename, upgrade=True):
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    """ Load a datamodule

    Parameters
    ----------
    filename : str
        Filepath
    upgrade : bool
        Upgrade datamodule to newest version

    Returns
    -------
    DataModule
        Returns a datamodule
    """
44
    with open(filename, 'rb') as f:
45
        a = pickle.load(f)
46

47
48
    if upgrade is True:
        return upgrade_dm(a)
49

50
51
    else:
        return a
52

53
def plot_multiple(dm_list, label_list, figure=None, colormap=None,
54
                  engine="bokeh", **kwargs):
55
    """Plot multiple Datamodules in a single bokeh/pyplot figure.
56
57
58
59
60
61
62
63
64
65
66
67
68
69

    Parameters
    -----------
    dm_list : list, array
        List of datamodules: [dm1, dm2, ...]
    label_list : list, array
        List of labels for the plot: ["Data of dm1", "Data of dm2"]
    figure : object
        Optional: Figure to plot in
    colormap : List of colors
        Optional: Bokeh colorpalette. You have to import it first from
        bokeh.palettes or specify itself. Format: ["#FFFFFF", "#01245", ...]
    engine : str
        Optional: Specify if Plot should be in bokeh or pyplot
70
71
    kwargs : keywords
        Optional: Keywords for bokeh or pyplot
72
    """
73
    # Import default colormap
74
75
76
    if colormap is None:
        colormap = Category10_10

77
    # Plot
78
79
80
81
82
83
    if engine.lower()[0] == "p":
        # Plot in pyplot
        if figure is None:
            fig = plt.figure()
        else:
            fig = figure
84
        for d, l, c in zip(dm_list, label_list, colormap):
85
            d.plot(fig=fig, legend=l, color=c, engine="p", **kwargs)
86
        plt.legend(label_list)
87
88
    else:
        # Plot in bokeh
Christian Schneider's avatar
Christian Schneider committed
89
90
91
        if not 'muted_alpha' in kwargs:
            # Set to invisible when not other specified 
            kwargs['muted_alpha'] = 0.0
92
93
94
95
        if figure is None:
            fig = bp.figure(width=800, height=500)
        else:
            fig = figure
96
        for d, l, c in zip(dm_list, label_list, colormap):
Christian Schneider's avatar
Christian Schneider committed
97
            d.plot(fig=fig, legend=l, color=c, **kwargs)
98
99
100
        fig.legend.click_policy = "mute"
        fig.legend.location = "top_left"
        bp.show(fig)
101

Christian Schneider's avatar
Christian Schneider committed
102
def load_folder(folderpath, word_search=None, dm_pars=None,
103
                regexp='[\w+\-.@<>]+.dm$', sort=True, sort_par=None):
104
105
106
107
108
109
110
111
112
113
114
115
116
    """Loads all datamodules from a specified folder path.

    Accepts a keyword to filter files or a regular expression to obtain
    parameters from filename.

    Sorts the datamodules by the first given parameter. Either in dm_pars
    or in the regular expression. If you wish to not sort it, pass `sort` False
    as argument.

    Examples
    ---------
    >>> dm.load_folder('./fluxsweep/', word_search='NewRun')
    >>> dm.load_folder('./fluxsweep/',\
117
 regexp='[\w+\-.]+I_(?P<current>[\d\-]+)[\w+\-.]')
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139

    Parameters
    ----------
    folderpath : str
        Path to the folder
    word_search : str, optional
        Only files containing this string will be added
    dm_pars : list(str), optional
        Extract this parameters out of the datamodules
        The first parameter is the sorting parameter
    regexp : str, optional
        Parameter to specify how the file name should look like. If given a
        parameter with (?P<par>) it will added to the dm_pars.
        https://docs.python.org/3.5/library/re.html
    sort : bool
        Sort the datamodules according to first parameter

    Returns
    --------
    tuple(list(DataModule), dict)
        Returns a tuple. First element is a sorted list of datamodules, the
        second argument is a dictionary with the sorted parameters
140
    """
Christian Schneider's avatar
Christian Schneider committed
141
142
    if not dm_pars:
        dm_pars = []
143
144
145
146

    # User mistake handling
    if not isinstance(dm_pars, list) and not isinstance(dm_pars, tuple):
        print('dm_pars must be a list or a tuple of strings')
Christian Schneider's avatar
Christian Schneider committed
147
        raise Exception('PARTYPE')
148
149
150
151
152
153
154
155

    # Read files
    fpath = os.path.abspath(folderpath)
    filelist = [os.path.basename(x) for x in glob.glob(fpath+'/*.dm')]
    dm_list = []
    pars = {}
    # Check if Keyword is given
    if word_search:
156
        tmp = []
157
158
        for f in filelist:
            if f.find(word_search) != -1:
159
160
161
162
163
164
165
166
167
168
                tmp.append(f)
        filelist = tmp
    # Check for regular expression
    for f in filelist:
        m = re.match(re.compile(regexp), f)

        if m:
            # Check for parameters in filepath
            for p, p_val in m.groupdict().items():
                try:
169
                    pars[p].append(p_val)
170
171
                except KeyError:
                    dm_pars.append(p)
172
                    pars[p] = [p_val]
173
174
175
176
177
178
179

            # Append loaded datamodule
            dm_list.append(load_datamodule(fpath+'/'+f))
            # Append parameters from parameter list
            for p in dm_pars:
                # Check if not already added
                if p not in m.groupdict():
180
                    try:
181
                        par = dm_list[-1].par[p]
182
                    except KeyError:
183
184
185
186
187
188
189
190
                        print('File {} has no parameter {}'.format(f, p))
                        raise Exception('Parameter not found.')
                    # Try to append parameter to dict. If no key is found,
                    # create one
                    try:
                        pars[p].append(par)
                    except:
                        pars[p] = [par]
191
192
193
194
    # Sort if a parameter is given/found by the first parameter in the list
    # Extract parameters from datamodule
    dm_list = np.array(dm_list)
    if sort and len(dm_pars) > 0:
195
196
197
198
199
200
201
        if sort_par is None:
            order_par = dm_pars[0]

        else:
            order_par = sort_par
        pars[order_par] = np.array(pars[order_par], dtype=float)  # for sorting
        ordered_idxs = np.argsort(pars[order_par])
202
203
204
205
206
207
208
209
        # Order datamodules
        dm_list = dm_list[ordered_idxs]
        # Order parameters
        for p in dm_pars:
            pars[p] = np.array(pars[p])
            pars[p] = pars[p][ordered_idxs]

    return dm_list, pars
210

211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
def load_csv(filename, dmtype=None, delimiter=',', **kwargs):
    """Load data from a .csv file using pandas and create a datamodule.

    Parameters
    ----------
    filename : str
        Filename of csv
    dmtype : str
        Choose from 'data_line', 'data_complex', 'data_surface'
        Used data type for datamodule. If None, it will try
        to figure out the data type automatically from the csv
        structure.
    delimiter : str
        Delimiter for .csv file
    **kwargs
        Keywords for pandas for csv import

    Returns
    -------
     DataModule
        A datamodule best fitting to the csv data or specified by dmtype
232
    """
233
    df = pd.read_csv(filename, delimiter=delimiter, **kwargs)
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
    header = list(df.columns.values)

    if not dmtype:
        # Automatically determine data type
        if df.shape[1] == 2:
            dmtype = 'data_line'
        elif df.shape[1] == 3 and header[1][:2] == 're':
            # Not so nice, better use regular expression in future
            dmtype = 'data_complex'
        elif df.shape[1] == 3:
            dmtype = 'data_surface'
        else:
            raise Exception('Could not determine csv datatype.')

    if dmtype == 'data_line':
        if df.shape[1] != 2:
            print('Warning: More than two columns in csv file. Neglected them')
251
        dm_line = data_xy(df.values[:, 0], df.values[:, 1])
David Zöpfl's avatar
David Zöpfl committed
252
        dm_line.comments = header[0] + '||' + header[1]
Christian Schneider's avatar
Christian Schneider committed
253

David Zöpfl's avatar
David Zöpfl committed
254
        return dm_line
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274

    elif dmtype == 'data_complex':

        if df.shape[1] != 3:
            print('CSV has not three columns.')
            raise Exception('Wrong datamodule format')

        dm_cplx = data_complex(df.values[:, 0], df.values[:, 1] + 1j *
                               df.values[:, 2])
        # Stores header in the comments
        dm_cplx.comments = header[0] + '||' + header[1] + '||' + header[2]
        return dm_cplx

    elif dmtype == 'data_surface':
        if df.shape[1] != 3:
            print('Warning: More than 3 columns in csv file. Neglected them')
        dm_surf = data_surface(df.values[:, 0], df.values[:, 1],
                               df.values[:, 2])
        # Stores header in the comments
        dm_surf.comments = header[0] + '||' + header[1] + '||' + header[2]
David Zöpfl's avatar
David Zöpfl committed
275
        return dm_surf
276
277


278
def data_stack_x(*args):
279
    """Stack datamodules along the x axis.
280

281
282
    It returns a new data module that contains the same parameters of the
    first argument.
283

284
285
    Automatically detects data_line and data_surface
    Automatically sorts data along x.
286

287
288
289
290
291
    Examples
    --------
        >>> total = data_stack_x((data1,data2,data3,...))
    """
    # Helper functions
292
    def average_duplicates(frequencies, values):
293
        """Function which averages values if two frequencies are the same"""
294
295
        folded, indices, counts = np.unique(frequencies, return_inverse=True,
                                            return_counts=True)
296
        output = np.zeros(folded.shape[0], dtype=type(values[0]))
297
298
299
300
301
        np.add.at(output, indices, values)
        output /= counts

        return folded, output

302
    data_type = args[0][0]
303
304
305
306
307
308
    datas = list(args[0])
    tmp = datas.pop(0).copy()
    tmp = tmp.copy()  # Copy, otherwise the argument will be modified
    for count in range(len(datas)):
        # Go through the other arrays and append
        a = datas.pop(0)
309
        if not isinstance(a, type(data_type)):
310
311
312
313
314
315
            print('Error, the data type are different')
            raise Exception('TYPE Mismatch')

        tmp.x = np.hstack((tmp.x, a.x))

        # Data2d
316
        if isinstance(data_type, data_xy):
317
318
            tmp.y = np.hstack((tmp.y, a.y))
        # Complex data
319
        elif isinstance(data_type, data_complex):
320
321
            tmp.value = np.hstack((tmp.value, a.value))
        # Data3d
322
        elif isinstance(data_type, data_surface):
323
324
325
326
327
328
329
            tmp.z = np.hstack((tmp.z, a.z))

    # Sort frequencies
    idxs_sorted = np.argsort(tmp.x)
    tmp.x = tmp.x[idxs_sorted]

    # Data2d
330
    if isinstance(data_type, data_xy):
331
332
333
        tmp.y = tmp.y[idxs_sorted]
        tmp.x, tmp.y = average_duplicates(tmp.x, tmp.y)
    # Complex data
334
    elif isinstance(data_type, data_complex):
335
336
337
        tmp.value = tmp.value[idxs_sorted]
        tmp.x, tmp.value = average_duplicates(tmp.x, tmp.value)
    # Data3d
338
    elif isinstance(data_type, data_surface):
339
340
341
342
343
344
345
346
347
348
349
        tmp.z = tmp.z[idxs_sorted]
        # Implement averaging
    else:
        print('No routing for this kind of data')
        return None

    tmp.select()
    return tmp


def average_data(*args):
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
    """Perform weighted average of multiple datamodules

    This function performs the weighted average of the data passed as an
    argument, the date must be given in a tuple/list together with the
    weight (1 = normal average).

    The function returns a new data module that contains the same
    parameters of the first argument, it automatically detects different
    data types.

    Parameters
    ----------
    *args
        Tuple of tuples. First element of each tuple is a datamodule, the
        second the weighting of this datamodule for averaging

    Returns
    --------
    DataModule
        DataModule of the same type for the averaged data
    Examples
    --------
        >>> average = average_data((data1,3),(data2,5))
        >>> average = average_data((data1,1),(data2,1))
374
    """
375
    if isinstance(args[0][0], data_xy):
376
        tmp = args[0][0].copy()
User expired's avatar
User expired committed
377
378
379
        tmp_ysel = tmp.return_ysel()
        tmp_xsel = tmp.return_xsel()
        acc = np.zeros(len(tmp_ysel))
380
381
        weight = 0
        for count, a in enumerate(args):
382
            if not isinstance(a[0], data_xy):
383
384
                print('Error, the data type are different')
                raise Exception('TYPE Mismatch')
385

User expired's avatar
User expired committed
386
            if not (a[0].return_xsel() == tmp_xsel).all():
387
388
389
                print('ERROR: x-axis must be the same: '+str(count))
                raise Exception('WRONGAXIS')

User expired's avatar
User expired committed
390
            acc += a[0].return_ysel()*a[1]
391
392
            weight += a[1]

User expired's avatar
User expired committed
393
        tmp.load_var(tmp_xsel, acc/weight)
394

395
396
        return tmp

397
    elif isinstance(args[0][0], data_complex):
398
        tmp = args[0][0].copy()
User expired's avatar
User expired committed
399
        acc = np.zeros(len(tmp.return_vsel()), dtype=np.complex)
400
401
        weight = 0
        for count, a in enumerate(args):
402
            if not isinstance(a[0], data_complex):
403
404
405
                print('Error, the data type are different')
                raise Exception('TYPE Mismatch')

User expired's avatar
User expired committed
406
            if not (a[0].return_xsel() == tmp.return_xsel()).all():
407
408
409
                print('ERROR: x-axis must be the same: '+str(count))
                raise Exception('WRONGAXIS')

User expired's avatar
User expired committed
410
            acc += a[0].return_vsel()*a[1]
411
412
            weight += a[1]

User expired's avatar
User expired committed
413
        tmp.load_cplx_var(tmp.return_xsel(), acc/weight)
414

415
416
        return tmp

417
    elif isinstance(args[0][0], data_surface):
418
        tmp = args[0][0].copy()
User expired's avatar
User expired committed
419
        acc = np.zeros((len(tmp.return_xsel()), len(tmp.return_ysel())))
420
421
        weight = 0
        for count, a in enumerate(args):
422
            if not isinstance(a[0], data_surface):
423
424
425
                print('Error, the data type are different')
                raise Exception('TYPE Mismatch')

User expired's avatar
User expired committed
426
            if not (a[0].return_xsel() == tmp.return_xsel()).all():
427
428
429
                print('ERROR: x-axis must be the same: '+str(count))
                raise Exception('WRONGAXIS')

User expired's avatar
User expired committed
430
            if not (a[0].return_ysel() == tmp.return_ysel()).all():
431
432
433
                print('ERROR: y-axis must be the same: '+str(count))
                raise Exception('WRONGAXIS')

User expired's avatar
User expired committed
434
            acc += a[0].return_zsel()*a[1]
435
436
            weight += a[1]

437
438
        tmp.load_var(tmp.return_xsel(), tmp.return_ysel(),  acc/weight)

439
440
441
442
443
444
445
        return tmp
    else:
        print('No routing for this kind of data')
        return None


def cleandat(x, y):
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
    """AUtoclean data.

    Take a two row data matrix (x,y) and do the following:
    1. Sort data in ascending x order
    2. Find unique values of x and calculate average y
    3. Add a third row with standard deviation for multiple data
    (indication of statistical error)

    Parameters
    ----------
    x : list
        X values. Both arrays get sorted according to ascending x order
    y : list
        Y values.

    Returns
    --------
    tuple(list, list, list)
        x, y, y_errs
        Outputs the processed x and y array and gives also the std deviations
466
    """
467
468
    x = np.array(x)
    y = np.array(y)
469
470
471
472
473
474
475
    x_sort_idx = np.argsort(x)
    x_srt = x[x_sort_idx]
    y_srt = y[x_sort_idx]

    # Find unique values and average for same frequency
    folded, indices, counts = np.unique(x_srt, return_inverse=True,
                                        return_counts=True)
476
    # Average if multiple y values exist
477
478
479
480
481
482
483
484
485
486
487
488
489
    output = np.zeros(folded.shape[0])
    np.add.at(output, indices, y_srt)
    output /= counts

    # Calculate std deviations if multiple values exist
    y_err = np.zeros_like(output, dtype=np.float64)
    np.add.at(y_err, indices, (y_srt - output[indices])**2)
    y_err = np.sqrt(y_err/counts)

    return folded, output, y_err


def split(vector, range_vector):
490
491
    """This function gets a list (array) of ranges (must have even members)
    and returns the data in x that fall within those ranges.
492

493
494
495
496
    TODO
    ----
    Do we need this function? And for what purpose?
    """
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
    # Convert input to row vectors
    x = np.array(vector).ravel()
    ranges = np.array(range_vector).ravel()

    # initialize variables
    idx = np.ma.zeros(len(x))

    # check if "ranges" has even elements
    if (len(range_vector) & 1):
        raise Exception('Number of range elements must be even.')
    else:
        # Making the new index and output
        for i in range(int(len(ranges)/2)):
            idx_tmp = np.logical_and(x >= ranges[2*i], x <= ranges[2*i+1])
            idx = np.logical_or(idx, idx_tmp)
        return idx, vector[idx]


515
516
def upgrade_dm(old_dm):
    """Upgrade datamodules to newest version.
517

518
    Function to convert old datamodule to current datamodule.
519

520
521
522
523
    Parameters
    ----------
    old_dm : DataModule
        DataModule to upgrade
524

525
526
527
    Note
    -----
     The fit will be lost
528

529
530
531
532
    Returns
    --------
    DataModule
        Updated datamodule.
533
    """
Christian Schneider's avatar
Christian Schneider committed
534
    # Check and initialize right data module type
535
536
537
538
    try:
        vers = old_dm.version
        vers_f = [int(x) for x in vers.split('.')]
    except:
Christian Schneider's avatar
Christian Schneider committed
539
        # Old datamodule had no version number --> assign 0.0.0
540
541
542
543
544
545
546
547
548
        vers = '0.0.0'
        vers_f = [int(x) for x in vers.split('.')]

    new_vers = [int(x) for x in __version__.split('.')]

    if vers_f > new_vers:
        print('Loaded Datamodule newer than your installed version\n' +
              'Please update python_repo (git pull)')

549
    elif vers_f < new_vers or not isinstance(old_dm, current_types):
550
551
        # Check if old datamodule is below current version and upgrade if
        # Initialization
552
        if isinstance(old_dm, data_xy) or isinstance(old_dm, data_line):
553
            # Data line
554
            data_new = data_xy(old_dm.x, old_dm.y)
555
556
557
558
559
            try:
                data_new.xmin = old_dm.xmin
                data_new.xmax = old_dm.xmax  # Selected
            except AttributeError:
                pass
560
            # Sort data
561
            # TODO
562
563
564
565
566
567
568
569
570
571
572
573
574
            # Fit
            try:
                tmp = old_dm._fit_function_code
                data_new._fit_executed = True
                data_new._fit_function = old_dm._fit_function
                data_new._fit_function_code = tmp
                data_new._fit_parameters = old_dm._fit_parameters
                data_new._fit_par_errors = old_dm._fit_par_errors
                data_new._fit_data_error = old_dm._fit_data_error
                data_new._fit_labels = old_dm._fit_labels
            except AttributeError:
                pass

575
576
577
578
        elif isinstance(old_dm, (data_grid, data_surface)):
            data_new = data_grid([old_dm.x, old_dm.y, old_dm.z.T],
                                 ['x', 'y', 'z'])

579
580
581
582
583
584
585
586
587
        elif isinstance(old_dm, data_complex):
            data_new = data_complex(old_dm.x, old_dm.value)
            try:
                data_new.xmin = old_dm.xmin
                data_new.xmax = old_dm.xmax
                data_new.sellen = old_dm.sellen
                data_new._circle_fit = old_dm._circle_fit
                data_new.fitresults = old_dm.fitresults
                data_new.fitresults_full_model = data_new.fitresults_full_model
Christian Schneider's avatar
Christian Schneider committed
588
589
                data_new.dB = old_dm.dB
                data_new.phase = old_dm.phase
590
            except AttributeError:
591
592
593
594
595
                data_new.dB = data_xy(old_dm.x,
                                      20 * np.log10(np.abs(old_dm.value)))
                data_new.phase = data_xy(old_dm.x,
                                         np.unwrap(np.angle(old_dm.value)) *
                                         180 / np.pi)
596
                pass
597
        elif isinstance(old_dm, data_IQ):
598
599
            data_new = old_dm.copy() 
            data_new.version = __version__
600
601
602
603
604
605
606
607
608
609
610
611
612
613
        else:
            print('Datamodule type not recognized')
            raise Exception('TypeError')

        # Update parameters
        data_new.par = old_dm.par
        data_new.comments = old_dm.comments
        data_new.temp_start = old_dm.temp_start
        data_new.temp_stop = old_dm.temp_stop
        data_new.temp_start_time = old_dm.temp_start_time
        data_new.temp_stop_time = old_dm.temp_stop_time
        data_new.time_start = old_dm.time_start
        data_new.time_stop = old_dm.time_stop

614
        try:
615
616
            data_new.date_format = old_dm.date_format
            data_new.save_date_format = old_dm.save_date_format
617
618
        except AttributeError:
            pass
619

620
        try:
621
622
623
624
625
626
            oV = old_dm.version
            nV = data_new.version
            print('\rConversion {} --> {} executed'.format(oV, nV),
                  end=' ', flush=True)
            # Added \r to move cursor back to beginning if multiple files are
            # updated.
627
        except:
628
629
            print('\rConversion Unknown version --> {} executed'
                  .format(data_new.version), end=' ', flush=True)
630
        return data_new
631

632
633
634
    else:
        # No need to upgrade
        return old_dm
635

636

637
# TODO
638
639
640
641
642
643
644
645
646
647
###############################################################################
# NEED RETHINKING #############################################################
def data_stack_y(*args):
    """
        This function cannot work, because you stack Frequencies horizontally
        and stack the z value vertically.

        Ideas: - Use vertical x-axis. (can lead to complications with existing
                 functions)
    """
648
    data_type = args[0][0]
649
    if isinstance(data_type, data_surface):
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
        datas = list(args[0])
        tmp = datas.pop(0)
        tmp = tmp.copy()

        for count in range(len(datas)):
            a = datas.pop(0)
            if type(a) != data_type:
                print('Error, the data type are different')
                raise Exception('TYPE Mismatch')

            if not (a.x == tmp.x).all():
                print('Error: x-axis must be the same: '+str(count))
                return Exception('WRONGAXIS')

            tmp.y = np.hstack((tmp.y, a.y))
            tmp.z = np.vstack((tmp.z, a.z))
        return tmp

    data_type = type(args[0][0])
669
    if data_type is type(data_xy()):
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
        #all modules must be the same
        datas = list(args[0])
        tmp = datas.pop(0)
        tmp = tmp.copy() # I have to copy it, otherwise the argument will be modified
        
        for count in range(len(datas)):
            a = datas.pop(0)
            if type(a) != data_type:
                print('Error, the data type are different')
                raise Exception('TYPE Mismatch')
            
            if not (a.x == tmp.x).all():
                print('ERROR: x-axis must be the same: '+str(count))
                raise Exception('WRONGAXIS')
            
            tmp.x = np.hstack((tmp.x,a.x))
            tmp.y = np.hstack((tmp.y,a.y))
        
        tmp.select()        
        return tmp

691
    elif data_type is type(data_complex()):
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
        #all modules must be the same
        datas = list(args[0])
        tmp = datas.pop(0)
        tmp = tmp.copy() # I have to copy it, otherwise the argument will be modified
        
        for count in range(len(datas)):
            a = datas.pop(0)
            if type(a) != data_type:
                print('Error, the data type are different')
                raise Exception('TYPE Mismatch')
            
            if not (a.x == tmp.x).all():
                print('ERROR: x-axis must be the same: '+str(count))
                raise Exception('WRONGAXIS')
            
            tmp.x = np.hstack((tmp.x,a.x))
            tmp.value = np.hstack((tmp.value,a.value))
        
        tmp.select()        
        return tmp

    
714
    elif data_type is type(data_surface()):
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
        datas = list(args[0])
        tmp = datas.pop(0)
        tmp = tmp.copy() # I have to copy it, otherwise the argument will be modified
        
        for count in range(len(datas)):
            a = datas.pop(0)
            if type(a) != data_type:
                print('Error, the data type are different')
                raise Exception('TYPE Mismatch')
            
            if not (a.x == tmp.x).all():
                print('Error: x-axis must be the same: '+str(count))
                return Exception('WRONGAXIS')
            
            
            tmp.y = np.hstack((tmp.y,a.y))
            tmp.z = np.vstack((tmp.z,a.z))
        
        return tmp
    else:
        print('No routing for this kind of data')
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
        return None

def nice_plot(figsize=(15,15),plot_style='bo',xlabel='',ylabel='',title='',fontsize=22,xoffset=None, yoffset=None,fontname='Bitstream Vera Serif',box_param=None):
        '''
        functionf nice_plot(figsize=(15,15),plot_style='bo',xlabel='',ylabel='',title='',fontsize=22,xoffset=None, yoffset=None,fontname='Bitstream Vera Serif',legend=None,legend_pos=0,box_param=None):
        
        given a data module 2D, this function can be used to plot the data in a nicer way (for talks or articles). If the data has been fitted, it will also print the fit (it can be disabled, def is on).

                
        xoffset and yoffset can be used to use a scientific notation on the axis
        fontname='Bitstream Vera Serif' HINT: Use font_list() to have a list of the available fonts

        legend_pos: 0 is auto, 1 to 4 are the angles
        legend: must be a list of two strings, ex: ['data','fit']
        box_param: must be a list of 4 coordinates and a text, ex: ([0.01,0.01,0.1,0.1], 'testing the box')
751
752
753
        The first two coordinates are the position of the top-left angle in % of the total plot size.
        The others are width and length, always in total % plot size
        '''
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
        plt.rcParams.update({'font.size':fontsize})    
        plt.figure(figsize=figsize)
        
        

        
        plt.xlabel(xlabel,fontname=fontname),plt.ylabel(ylabel,fontname=fontname)

        
        plt.title(title,fontname=fontname)    
        
        # axis ticks and tick labels
        
        plt.tick_params(axis='both', length=6, width=2, pad=10)
        # axis ticks and tick labels
        
        tickfonts = matplotlib.font_manager.FontProperties(family=fontname,size=fontsize)
        tmp = plt.axes()
        for label in (tmp.get_xticklabels() + tmp.get_yticklabels()):
            label.set_fontproperties(tickfonts)
        
        #plt.xaxis.get_major_formatter().set_useOffset(False)
        #plt.yaxis.get_major_formatter().set_useOffset(False)
        if xoffset is None:        
            xconf = plt.ScalarFormatter(useOffset=False)
        else:
            xconf = plt.ScalarFormatter(useOffset=True)
            xconf.set_useOffset(xoffset)
            
        if yoffset is None:        
            yconf = plt.ScalarFormatter(useOffset=False)
        else:
            yconf = plt.ScalarFormatter(useOffset=True)
            yconf.set_useOffset(yoffset)
        
        tmp.xaxis.set_major_formatter(xconf)
        tmp.yaxis.set_major_formatter(yconf)
        if box_param != None:
              plt.text(box_param[0][0],box_param[0][1],box_param[1],size=24,bbox=dict(boxstyle='square',fc=(1,1,1)))
              #plt.text(0.1,0.16,box_param[1],fontname=fontname)
              #plt.setp(box,xlim=(0,2),xticks=[],yticks=[])
795
              #plt.subplot(111)