# ============================================================================ # # PUBLIC DOMAIN NOTICE # # National Institute on Deafness and Other Communication Disorders # # This software/database is a "United States Government Work" under the # terms of the United States Copyright Act. It was written as part of # the author's official duties as a United States Government employee and # thus cannot be copyrighted. This software/database is freely available # to the public for use. The NIDCD and the U.S. Government have not placed # any restriction on its use or reproduction. # # Although all reasonable efforts have been taken to ensure the accuracy # and reliability of the software and data, the NIDCD and the U.S. Government # do not and cannot warrant the performance or results that may be obtained # by using this software or data. The NIDCD and the U.S. Government disclaim # all warranties, express or implied, including warranties of performance, # merchantability or fitness for any particular purpose. # # Please cite the author in any work or product based on this material. # # ========================================================================== # *************************************************************************** # # Large-Scale Neural Modeling software (LSNM) # # Section on Brain Imaging and Modeling # Voice, Speech and Language Branch # National Institute on Deafness and Other Communication Disorders # National Institutes of Health # # This file (func_conn_fmri_visual.py) was created on July 10, 2015. # # Based in part on Matlab scripts by Horwitz et al. # # Author: Antonio Ulloa # # Last updated by Antonio Ulloa on August 13, 2015 # **************************************************************************/ # func_conn_fmri_visual.py # # Calculate and plot functional connectivity (within-task time series correlation) # of the BOLD timeseries in IT with the BOLD timeseries of all other simulated brain # regions. import numpy as np import matplotlib.pyplot as plt import pandas as pd import math as m from scipy.stats import poisson # define the length of both each trial and the whole experiment # in synaptic timesteps, as well as total number of trials experiment_length = 3960 trial_length = 110 number_of_trials = 36 synaptic_timesteps = experiment_length # define the name of the output file where the functional connectivity timeseries will be stored func_conn_dms_file = 'corr_fmri_IT_vs_all_dms_poisson.npy' func_conn_ctl_file = 'corr_fmri_IT_vs_all_ctl_poisson.npy' # define an array with location of control trials, and another array # with location of task-related trials, relative to # an array that contains all trials (task-related trials included) # because we lose two trials at the beginning of the fmri experiment, we # are left with only one trial during the first DMS block # Therefore, out of 36 trials total we were left with only 34 trials, # each trial lasting approximately ~3 MR ticks (2.75) control_trials = [3,4,5,9,10,11,15,16,17,21,22,23,27,28,29,33,34,35] dms_trials = [0,1,2,6,7,8,12,13,14,18,19,20,24,25,26,30,31,32] # define neural synaptic time interval in seconds. The simulation data is collected # one data point at synaptic intervals (10 simulation timesteps). Every simulation # timestep is equivalent to 5 ms. Ti = 0.005 * 10 # define constant needed for hemodynamic function (in milliseconds) lambda_ = 6 # Total time of scanning experiment in seconds (timesteps X 5) T = 198 # Time for one complete trial in milliseconds Ttrial = 5.5 # the scanning happened every Tr interval below (in milliseconds). This # is the time needed to sample hemodynamic activity to produce # each fMRI image. Tr = 2 # the following ranges define the location of the nodes within a given ROI in Hagmann's brain. # They were taken from the excel document: # "Hagmann's Talairach Coordinates (obtained from TVB).xlsx" # Extracted from The Virtual Brain Demo Data Sets # Please note that arrays in Python start from zero so one does need to account for that and shift # indices given by the above document by one location. # Use 6 nodes within rPCAL v1_loc = range(344, 350) # Hagmann's brain nodes included within V1 ROI # Use 6 nodes within rFUS v4_loc = range(390, 396) # Hagmann's brain nodes included within V4 ROI # Use 6 nodes within rPARH it_loc = range(412, 418) # Hagmann's brain nodes included within IT ROI # Use 6 nodes within rRMF d1_loc = range(73, 79) # Hagmann's brain nodes included within D1 ROI # Use 6 nodes within rPTRI d2_loc = range(39, 45) # Hagmann's brain nodes included within D2 ROI # Use 6 nodes within rPOPE fs_loc = range(47, 53) # Hagmann's brain nodes included within FS ROI # Use 6 nodes within rCMF fr_loc = range(125, 131) # Hagmann's brain nodes included within FR ROI # Use 6 nodes within lPARH lit_loc= range(911, 917) # Hagmann's brain nodes included within left IT ROI # Load TVB nodes synaptic activity tvb_synaptic = np.load("tvb_synaptic.npy") # Load TVB host node synaptic activities into separate numpy arrays tvb_ev1 = tvb_synaptic[:, 0, v1_loc[0]:v1_loc[-1]+1, 0] tvb_ev4 = tvb_synaptic[:, 0, v4_loc[0]:v4_loc[-1]+1, 0] tvb_eit = tvb_synaptic[:, 0, it_loc[0]:it_loc[-1]+1, 0] tvb_ed1 = tvb_synaptic[:, 0, d1_loc[0]:d1_loc[-1]+1, 0] tvb_ed2 = tvb_synaptic[:, 0, d2_loc[0]:d2_loc[-1]+1, 0] tvb_efs = tvb_synaptic[:, 0, fs_loc[0]:fs_loc[-1]+1, 0] tvb_efr = tvb_synaptic[:, 0, fr_loc[0]:fr_loc[-1]+1, 0] tvb_iv1 = tvb_synaptic[:, 1, v1_loc[0]:v1_loc[-1]+1, 0] tvb_iv4 = tvb_synaptic[:, 1, v4_loc[0]:v4_loc[-1]+1, 0] tvb_iit = tvb_synaptic[:, 1, it_loc[0]:it_loc[-1]+1, 0] tvb_id1 = tvb_synaptic[:, 1, d1_loc[0]:d1_loc[-1]+1, 0] tvb_id2 = tvb_synaptic[:, 1, d2_loc[0]:d2_loc[-1]+1, 0] tvb_ifs = tvb_synaptic[:, 1, fs_loc[0]:fs_loc[-1]+1, 0] tvb_ifr = tvb_synaptic[:, 1, fr_loc[0]:fr_loc[-1]+1, 0] # now extract synaptic activity in the contralateral IT tvb_elit = tvb_synaptic[:, 0, lit_loc[0]:lit_loc[-1]+1, 0] tvb_ilit = tvb_synaptic[:, 1, lit_loc[0]:lit_loc[-1]+1, 0] # Load LSNM synaptic activity data files into numpy arrays ev1h = np.loadtxt('ev1h_synaptic.out') ev1v = np.loadtxt('ev1v_synaptic.out') iv1h = np.loadtxt('iv1h_synaptic.out') iv1v = np.loadtxt('iv1v_synaptic.out') ev4h = np.loadtxt('ev4h_synaptic.out') ev4c = np.loadtxt('ev4c_synaptic.out') ev4v = np.loadtxt('ev4v_synaptic.out') iv4h = np.loadtxt('iv4h_synaptic.out') iv4c = np.loadtxt('iv4c_synaptic.out') iv4v = np.loadtxt('iv4v_synaptic.out') exss = np.loadtxt('exss_synaptic.out') inss = np.loadtxt('inss_synaptic.out') efd1 = np.loadtxt('efd1_synaptic.out') ifd1 = np.loadtxt('ifd1_synaptic.out') efd2 = np.loadtxt('efd2_synaptic.out') ifd2 = np.loadtxt('ifd2_synaptic.out') exfs = np.loadtxt('exfs_synaptic.out') infs = np.loadtxt('infs_synaptic.out') exfr = np.loadtxt('exfr_synaptic.out') infr = np.loadtxt('infr_synaptic.out') # add all units WITHIN each region together across space to calculate # synaptic activity in EACH brain region v1_syn = np.sum(ev1h + ev1v + iv1h + iv1v, axis = 1) + np.sum(tvb_ev1+tvb_iv1, axis=1) v4_syn = np.sum(ev4h + ev4v + ev4c + iv4h + iv4v + iv4c, axis = 1) + np.sum(tvb_ev4+tvb_iv4, axis=1) it_syn = np.sum(exss + inss, axis = 1) + np.sum(tvb_eit+tvb_iit, axis=1) d1_syn = np.sum(efd1 + ifd1, axis = 1) + np.sum(tvb_ed1+tvb_id1, axis=1) d2_syn = np.sum(efd2 + ifd2, axis = 1) + np.sum(tvb_ed2+tvb_id2, axis=1) fs_syn = np.sum(exfs + infs, axis = 1) + np.sum(tvb_efs+tvb_ifs, axis=1) fr_syn = np.sum(exfr + infr, axis = 1) + np.sum(tvb_efr+tvb_ifr, axis=1) # now, add unit across space in the contralateral IT lit_syn = np.sum(tvb_elit + tvb_ilit, axis=1) # Extract number of timesteps from one of the synaptic activity arrays synaptic_timesteps = v1_syn.size # Construct a numpy array of time points time_in_seconds = np.arange(0, T, Tr) # the following calculates a Poisson distribution (that will represent a hemodynamic # function, given lambda (the Poisson time constant characterizing width and height # of hemodynamic function), and tau (the time step) # if you would do it manually you would do the following: #h = [lambda_ ** tau * m.exp(-lambda_) / m.factorial(tau) for tau in time_in_seconds] h = poisson.pmf(time_in_seconds, lambda_) # rescale the array containing the poisson to increase its size and match the size of # the synaptic activity array (using linear interpolation) scanning_timescale = np.arange(0, synaptic_timesteps, synaptic_timesteps / (T/Tr)) synaptic_timescale = np.arange(0, synaptic_timesteps) h = np.interp(synaptic_timescale, scanning_timescale, h) # now, we need to convolve the synaptic activity with the hemodynamic delay # function to generate a BOLD signal and then bring it back to a synaptic # timescale v1_BOLD = np.convolve(v1_syn, h)[synaptic_timescale] v4_BOLD = np.convolve(v4_syn, h)[synaptic_timescale] it_BOLD = np.convolve(it_syn, h)[synaptic_timescale] d1_BOLD = np.convolve(d1_syn, h)[synaptic_timescale] d2_BOLD = np.convolve(d2_syn, h)[synaptic_timescale] fs_BOLD = np.convolve(fs_syn, h)[synaptic_timescale] fr_BOLD = np.convolve(fr_syn, h)[synaptic_timescale] lit_BOLD= np.convolve(lit_syn, h)[synaptic_timescale] ############################################################################### ############################################################################### # We need to downsample the BOLD signal at this point, before we do any # more processing to it. v1_BOLD = v1_BOLD[scanning_timescale] v4_BOLD = v4_BOLD[scanning_timescale] it_BOLD = it_BOLD[scanning_timescale] d1_BOLD = d1_BOLD[scanning_timescale] d2_BOLD = d2_BOLD[scanning_timescale] fs_BOLD = fs_BOLD[scanning_timescale] fr_BOLD = fr_BOLD[scanning_timescale] lit_BOLD= lit_BOLD[scanning_timescale] # And now we scale it up 10 times its size, so that we can subdivide the arrays # in equally sized subarrays scanning_timescale_x_100 = np.arange(0, synaptic_timesteps, synaptic_timesteps / (T*100.0/Tr)) v1_BOLD = np.interp(scanning_timescale_x_100, scanning_timescale, v1_BOLD) v4_BOLD = np.interp(scanning_timescale_x_100, scanning_timescale, v4_BOLD) it_BOLD = np.interp(scanning_timescale_x_100, scanning_timescale, it_BOLD) d1_BOLD = np.interp(scanning_timescale_x_100, scanning_timescale, d1_BOLD) d2_BOLD = np.interp(scanning_timescale_x_100, scanning_timescale, d2_BOLD) fs_BOLD = np.interp(scanning_timescale_x_100, scanning_timescale, fs_BOLD) fr_BOLD = np.interp(scanning_timescale_x_100, scanning_timescale, fr_BOLD) lit_BOLD= np.interp(scanning_timescale_x_100, scanning_timescale, lit_BOLD) ############################################################################### ############################################################################### # remove first few scans from BOLD signal arrays (to eliminate edge effects from # convolution during the first block of scans) # Please note that arrays have to be divisible by the number of trials, i.e., the # BOLD arrays have to be split after deleting the first few scans, which has to # result in an equal division. v1_BOLD = np.delete(v1_BOLD, np.arange(432)) v4_BOLD = np.delete(v4_BOLD, np.arange(432)) it_BOLD = np.delete(it_BOLD, np.arange(432)) d1_BOLD = np.delete(d1_BOLD, np.arange(432)) d2_BOLD = np.delete(d2_BOLD, np.arange(432)) fs_BOLD = np.delete(fs_BOLD, np.arange(432)) fr_BOLD = np.delete(fr_BOLD, np.arange(432)) lit_BOLD= np.delete(lit_BOLD,np.arange(432)) print v1_BOLD.shape # Get rid of the control trials in the BOLD signal arrays, # by separating the task-related trials and concatenating them # together. Remember that each trial is a number of synaptic timesteps # long. # first, split the arrays into subarrays, each one containing a single trial it_subarrays = np.split(it_BOLD, number_of_trials) v1_subarrays = np.split(v1_BOLD, number_of_trials) v4_subarrays = np.split(v4_BOLD, number_of_trials) d1_subarrays = np.split(d1_BOLD, number_of_trials) d2_subarrays = np.split(d2_BOLD, number_of_trials) fs_subarrays = np.split(fs_BOLD, number_of_trials) fr_subarrays = np.split(fr_BOLD, number_of_trials) lit_subarrays= np.split(lit_BOLD, number_of_trials) print len(it_subarrays[0]) # we get rid of the inter-trial interval for each and all trials (1 second at the # end of each trial. 1 second = 50 array positions. #it_subarrays = np.delete(it_subarrays, np.arange(212,262), axis=1) #v1_subarrays = np.delete(v1_subarrays, np.arange(212,262), axis=1) #v4_subarrays = np.delete(v4_subarrays, np.arange(212,262), axis=1) #d1_subarrays = np.delete(d1_subarrays, np.arange(212,262), axis=1) #d2_subarrays = np.delete(d2_subarrays, np.arange(212,262), axis=1) #fs_subarrays = np.delete(fs_subarrays, np.arange(212,262), axis=1) #fr_subarrays = np.delete(fr_subarrays, np.arange(212,262), axis=1) # now, get rid of the control trials... it_DMS_trials = np.delete(it_subarrays, control_trials, axis=0) v1_DMS_trials = np.delete(v1_subarrays, control_trials, axis=0) v4_DMS_trials = np.delete(v4_subarrays, control_trials, axis=0) d1_DMS_trials = np.delete(d1_subarrays, control_trials, axis=0) d2_DMS_trials = np.delete(d2_subarrays, control_trials, axis=0) fs_DMS_trials = np.delete(fs_subarrays, control_trials, axis=0) fr_DMS_trials = np.delete(fr_subarrays, control_trials, axis=0) lit_DMS_trials= np.delete(lit_subarrays, control_trials, axis=0) # ... and concatenate the task-related trials together it_DMS_trials_ts = np.concatenate(it_DMS_trials) v1_DMS_trials_ts = np.concatenate(v1_DMS_trials) v4_DMS_trials_ts = np.concatenate(v4_DMS_trials) d1_DMS_trials_ts = np.concatenate(d1_DMS_trials) d2_DMS_trials_ts = np.concatenate(d2_DMS_trials) fs_DMS_trials_ts = np.concatenate(fs_DMS_trials) fr_DMS_trials_ts = np.concatenate(fr_DMS_trials) lit_DMS_trials_ts= np.concatenate(lit_DMS_trials) # but also, get rid of the DMS task trials, to create arrays with only control trials it_control_trials = np.delete(it_subarrays, dms_trials, axis=0) v1_control_trials = np.delete(v1_subarrays, dms_trials, axis=0) v4_control_trials = np.delete(v4_subarrays, dms_trials, axis=0) d1_control_trials = np.delete(d1_subarrays, dms_trials, axis=0) d2_control_trials = np.delete(d2_subarrays, dms_trials, axis=0) fs_control_trials = np.delete(fs_subarrays, dms_trials, axis=0) fr_control_trials = np.delete(fr_subarrays, dms_trials, axis=0) lit_control_trials= np.delete(lit_subarrays,dms_trials, axis=0) # ... and concatenate the control task trials together it_control_trials_ts = np.concatenate(it_control_trials) v1_control_trials_ts = np.concatenate(v1_control_trials) v4_control_trials_ts = np.concatenate(v4_control_trials) d1_control_trials_ts = np.concatenate(d1_control_trials) d2_control_trials_ts = np.concatenate(d2_control_trials) fs_control_trials_ts = np.concatenate(fs_control_trials) fr_control_trials_ts = np.concatenate(fr_control_trials) lit_control_trials_ts= np.concatenate(lit_control_trials) # Calculate new synaptic timesteps and scanning timescale, as all BOLD arrays # have been halved #new_synaptic_timesteps = v1_DMS_trials_ts.size #scanning_timescale = np.arange(0, new_synaptic_timesteps, new_synaptic_timesteps / (T/Tr)) # now, sample all BOLD arrays, DMS and control, to match the # MRI scanning interval: #v1_DMS_trials_ts = v1_DMS_trials_ts[scanning_timescale] #v4_DMS_trials_ts = v4_DMS_trials_ts[scanning_timescale] #it_DMS_trials_ts = it_DMS_trials_ts[scanning_timescale] #d1_DMS_trials_ts = d1_DMS_trials_ts[scanning_timescale] #d2_DMS_trials_ts = d2_DMS_trials_ts[scanning_timescale] #fs_DMS_trials_ts = fs_DMS_trials_ts[scanning_timescale] #fr_DMS_trials_ts = fr_DMS_trials_ts[scanning_timescale] #v1_control_trials_ts = v1_control_trials_ts[scanning_timescale] #v4_control_trials_ts = v4_control_trials_ts[scanning_timescale] #it_control_trials_ts = it_control_trials_ts[scanning_timescale] #d1_control_trials_ts = d1_control_trials_ts[scanning_timescale] #d2_control_trials_ts = d2_control_trials_ts[scanning_timescale] #fs_control_trials_ts = fs_control_trials_ts[scanning_timescale] #fr_control_trials_ts = fr_control_trials_ts[scanning_timescale] v1_BOLD_dms = v1_DMS_trials_ts v4_BOLD_dms = v4_DMS_trials_ts it_BOLD_dms = it_DMS_trials_ts d1_BOLD_dms = d1_DMS_trials_ts d2_BOLD_dms = d2_DMS_trials_ts fs_BOLD_dms = fs_DMS_trials_ts fr_BOLD_dms = fr_DMS_trials_ts lit_BOLD_dms= lit_DMS_trials_ts v1_BOLD_ctl = v1_control_trials_ts v4_BOLD_ctl = v4_control_trials_ts it_BOLD_ctl = it_control_trials_ts d1_BOLD_ctl = d1_control_trials_ts d2_BOLD_ctl = d2_control_trials_ts fs_BOLD_ctl = fs_control_trials_ts fr_BOLD_ctl = fr_control_trials_ts lit_BOLD_ctl= lit_control_trials_ts # now, convert DMS and control timeseries into pandas timeseries, so we can analyze it IT_dms_ts = pd.Series(it_BOLD_dms) V1_dms_ts = pd.Series(v1_BOLD_dms) V4_dms_ts = pd.Series(v4_BOLD_dms) D1_dms_ts = pd.Series(d1_BOLD_dms) D2_dms_ts = pd.Series(d2_BOLD_dms) FS_dms_ts = pd.Series(fs_BOLD_dms) FR_dms_ts = pd.Series(fr_BOLD_dms) LIT_dms_ts= pd.Series(lit_BOLD_dms) IT_ctl_ts = pd.Series(it_BOLD_ctl) V1_ctl_ts = pd.Series(v1_BOLD_ctl) V4_ctl_ts = pd.Series(v4_BOLD_ctl) D1_ctl_ts = pd.Series(d1_BOLD_ctl) D2_ctl_ts = pd.Series(d2_BOLD_ctl) FS_ctl_ts = pd.Series(fs_BOLD_ctl) FR_ctl_ts = pd.Series(fr_BOLD_ctl) LIT_ctl_ts= pd.Series(lit_BOLD_ctl) # ... and calculate the functional connectivity of IT with the other modules funct_conn_it_v1_dms = IT_dms_ts.corr(V1_dms_ts) funct_conn_it_v4_dms = IT_dms_ts.corr(V4_dms_ts) funct_conn_it_d1_dms = IT_dms_ts.corr(D1_dms_ts) funct_conn_it_d2_dms = IT_dms_ts.corr(D2_dms_ts) funct_conn_it_fs_dms = IT_dms_ts.corr(FS_dms_ts) funct_conn_it_fr_dms = IT_dms_ts.corr(FR_dms_ts) funct_conn_it_lit_dms= IT_dms_ts.corr(LIT_dms_ts) funct_conn_it_v1_ctl = IT_ctl_ts.corr(V1_ctl_ts) funct_conn_it_v4_ctl = IT_ctl_ts.corr(V4_ctl_ts) funct_conn_it_d1_ctl = IT_ctl_ts.corr(D1_ctl_ts) funct_conn_it_d2_ctl = IT_ctl_ts.corr(D2_ctl_ts) funct_conn_it_fs_ctl = IT_ctl_ts.corr(FS_ctl_ts) funct_conn_it_fr_ctl = IT_ctl_ts.corr(FR_ctl_ts) funct_conn_it_lit_ctl= IT_ctl_ts.corr(LIT_ctl_ts) func_conn_dms = np.array([funct_conn_it_v1_dms,funct_conn_it_v4_dms, funct_conn_it_fs_dms,funct_conn_it_d1_dms, funct_conn_it_d2_dms,funct_conn_it_fr_dms, funct_conn_it_lit_dms ]) func_conn_ctl = np.array([funct_conn_it_v1_ctl,funct_conn_it_v4_ctl, funct_conn_it_fs_ctl,funct_conn_it_d1_ctl, funct_conn_it_d2_ctl,funct_conn_it_fr_ctl, funct_conn_it_lit_ctl ]) # now, save all correlation coefficients to output files np.save(func_conn_dms_file, func_conn_dms) np.save(func_conn_ctl_file, func_conn_ctl) # define number of groups to plot N = 2 # create a list of x locations for each group index = np.arange(N) width = 0.1 # width of the bars fig, ax = plt.subplots() ax.set_ylim([0,1]) # now, group the values to be plotted by brain module it_v1_corr = (funct_conn_it_v1_dms, funct_conn_it_v1_ctl) it_v4_corr = (funct_conn_it_v4_dms, funct_conn_it_v4_ctl) it_fs_corr = (funct_conn_it_fs_dms, funct_conn_it_fs_ctl) it_d1_corr = (funct_conn_it_d1_dms, funct_conn_it_d1_ctl) it_d2_corr = (funct_conn_it_d2_dms, funct_conn_it_d2_ctl) it_fr_corr = (funct_conn_it_fr_dms, funct_conn_it_fr_ctl) it_lit_corr= (funct_conn_it_lit_dms,funct_conn_it_lit_ctl) rects_v1 = ax.bar(index, it_v1_corr, width, color='purple', label='V1') rects_v4 = ax.bar(index + width, it_v4_corr, width, color='darkred', label='V4') rects_fs = ax.bar(index + width*2, it_fs_corr, width, color='lightyellow', label='FS') rects_d1 = ax.bar(index + width*3, it_d1_corr, width, color='lightblue', label='D1') rects_d2 = ax.bar(index + width*4, it_d2_corr, width, color='yellow', label='D2') rects_fr = ax.bar(index + width*5, it_fr_corr, width, color='red', label='FR') rects_lit= ax.bar(index + width*6, it_lit_corr, width, color='pink', label='LIT') ax.set_title('FUNCTIONAL CONNECTIVITY OF IT WITH OTHER BRAIN REGIONS (fMRI)') # get rid of x axis ticks and labels ax.set_xticks([]) ax.set_xlabel('DMS TASK CONTROL TASK') ax.xaxis.set_label_coords(0.5, -0.025) # Shrink current axis by 10% to make space for legend box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.9, box.height]) # place a legend to the right of the figure plt.legend(loc='center left', bbox_to_anchor=(1.02, .5)) # Show the plots on the screen plt.show()