@phdthesis{10.7907/yhmt-9t69, author = {Han, Yanting}, title = {Emotion Experience from Stories, Videos and Everyday Life: Structure and Individual Differences}, school = {California Institute of Technology}, year = {2022}, doi = {10.7907/yhmt-9t69}, url = {https://resolver.caltech.edu/CaltechTHESIS:05272022-181044242}, abstract = {

Most studies of emotion have as their subject matter the emotion experiences that people can describe and rate. By contrast to this approach from psychology, studies in animals, and some biological studies in humans, focus on behavior and its adaptive function. These two literatures typically use very different corresponding features by which to characterize emotion: categories or dimensions describing feelings for which we have convenient words, for the former (e.g., happiness, pleasantness), and functional properties for the latter (e.g., persistence, generalizability, approachabil- ity). In this thesis I use both sets of ratings, and I ask whether the latter, biologically inspired features could also be used to characterize people’s emotion experiences, and might reveal novel dimensions of variability. They also typically use different sets of stimuli to induce the emotions: lexical stimuli in which participants are asked to imagine something hypothetical are common in human studies; ecologically valid stimuli that at least the subjects cannot distinguish from the real world are common in animal studies. Here I used three domains of stimuli: stories, videos, and real-life experiences, in the same set of participants, permitting a unique comparison.

I took advantage of a sample of approximately 1000 Americans who were surveyed longitudinally over the internet during the COVID-19 pandemic. I collected ratings of emotion experiences evoked by three classes of stimuli: a validated set of short stories, a validated set of short videos, and actual experiences in real life across multiple waves. I found that all three types of emotion experiences could be characterized by low dimensional spaces, with the first two factors that accounted for most of the variance in people’s ratings corresponding to the dimensions of valence and arousal, in line with prior work. However, I discovered additional novel factors related to generalizability (the extent to which an emotion experience is shared across many different situations and occurrences) or modularity (the extent to which an emotion experience is unique to specific situations). The findings show that emotion features not usually assessed in humans can be recovered from subjective ratings of their experiences. I argue for a revision of current dimensional theories of emotion: they have been incomplete because they were restricted to ratings entrenched in how we think of our conscious experience, and the typical English words we use to describe it. The new dimensions validate some theories of emotion and offer hope for linking psychological studies in humans with behavioral or neurobiological work across species. I also characterized the distributions of the three types of emotion experiences and found that emotions were distributed along continuous gradients, with no well-separated clusters even for emotions belonging to the six basic emotion categories.

My thesis presents two additional topics that capitalize on my unique sample: the emotions experienced during the COVID pandemic, and individual differences. For example, I also found that resilience buffered individuals against the effect of loneliness on depression, and that people who had tested positive for COVID felt more morally disgusted towards acts of violating social norms. I also explored the association between psychological traits and differences in emotion experiences both in terms of the magnitudes of the ratings and the overall correlation structure across scales. Again, the richness of my dataset reveals a number of associations that are theoretically interesting and that will be of relevance to understanding mood and anxiety disorders as well.

All of the data will be made publicly available, and the core parts of many of the investigations were pre-registered.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph}, } @phdthesis{10.7907/RG37-G744, author = {Fu, Zhongzheng}, title = {Representations of Action Monitoring and Cognitive Control by Single Neurons in the Human Brain}, school = {California Institute of Technology}, year = {2019}, doi = {10.7907/RG37-G744}, url = {https://resolver.caltech.edu/CaltechTHESIS:06012019-234115726}, abstract = {Cognitive control arises whenever a prepotent and often automatic response needs to be overcome by another response. Control is usually effortful and relies on monitoring processes that detect when control is needed and/or when it failed. Control is one of the most important aspects of human behavior in everyday life and is a critical component of executive function. In a series of three empirical chapters, I present results from invasive single-neuron recordings from the frontal cortex of neurosurgical human patients while they perform tasks requiring cognitive control. I show that a substantial proportion of neurons in the pre-supplementary motor area (pre-SMA), and in the dorsal anterior cingulate cortex (dACC), signal response errors shortly after they occurred, but well before onset of feedback. Here I demonstrate that these error neurons signal self-detected errors and that they were separate from neurons signaling conflict. The response of error neurons correlated trial-by-trial with the simultaneously recorded intracranial error-related negativity (iERN), thereby establishing a single-neuron correlate of this important scalp potential. iERN-error neuron synchrony in dACC, but not pre-SMA, predicted whether post-error slowing, which is a measure of control, occurred or not. Spike-field coherence between action potentials and local field potentials in specific frequency bands, and latency differences between the different brain regions, suggest a mechanistic model whereby information relevant to control is passed between sectors of the medial frontal cortex. Multiplexing of different ex-post monitoring signals by individual neurons further documents that control relies on multiple sources of information, which can be dynamically routed in the brain depending on task demands. These findings provide the most complete set of single-neuron data on how errors and conflict signals at the single neuron level contribute to cognitive controls in humans. They provide a first-single neuron correlate of an extensively utilized scalp EEG potential. Together, this work provides a strong complement to investigations of this topic using fMRI in humans, and using electrophysiology in monkeys, and suggests specific future directions.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph}, } @phdthesis{10.7907/CEMB-6R23, author = {Lin, Chujun}, title = {Understanding How People Make Trait Attributions from Faces}, school = {California Institute of Technology}, year = {2019}, doi = {10.7907/CEMB-6R23}, url = {https://resolver.caltech.edu/CaltechTHESIS:05232019-113042503}, abstract = {

This thesis is motivated by the fascinating question of how people make inferences about others from their faces. How do we infer somebody’s intent or personality merely from looking at them? I studied this question by investigating how people make trait attributions in two specific contexts – political election (Chapter 2) and political corruption (Chapter 3) – as well as how people make a large variety of trait attributions from faces in general (Chapter 4). I employed novel methods to representatively sample the words used to rate faces, and to select the facial stimuli themselves (e.g., using artificial neural networks), to test the reproducibility and generalizability of my results (e.g., pre-registration, generalization across participants from different cultures), and to elucidate the underlying mechanisms (e.g., mediation modeling, digital manipulation of facial stimuli). The results demonstrated that trait attributions from politician’s faces were associated with real election outcomes in different cultures, and that culture shaped trait attributions relevant to a given context (Chapter 2); trait attributions from politician’s faces were also associated with real corruption/violation records of the politicians, and perceived corruptibility was associated with the width of the face (Chapter 3). Trait attributions from faces in general (Chapter 4) were well-described by four novel dimensions that I discovered: critical/condescending, leadership/competence, female-stereotype, and youth-stereotype. Taken together, the findings provide a new psychological framework for trait attributions, demonstrate cross- cultural generalizability, and link trait attributions to real-world behaviors.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph}, } @phdthesis{10.7907/8f55-zz50, author = {Minxha, Juri}, title = {Single-Neuron Correlates of Visual Object Representation in the Human Brain: Effects of Attention, Memory, and Choice}, school = {California Institute of Technology}, year = {2018}, doi = {10.7907/8f55-zz50}, url = {https://resolver.caltech.edu/CaltechTHESIS:06082018-133105605}, abstract = {

Neurons in the medial temporal lobe (amygdala and hippocampus) are known to respond selectively to specific object categories, such as faces. This dissertation investigates two novel extensions of this work: (1) how are such neuronal responses influenced by where we attend; (2) how is category information used by the brain to make decisions.

The first question evaluated the representation of faces in the primate amygdala during naturalistic conditions, by recording from both human and macaque amygdala neurons during free viewing of arrays of images with concurrent eye tracking. We found that category-selective responses were very strongly modulated by where people, or monkeys, fixated (overt attention). Subsequent experiments in humans only further demonstrated that this effect holds even when people allocate visual attention while keeping central fixation (covert attention). In both monkeys and humans, the majority of face-selective neurons preferred faces of conspecifics, a bias also seen behaviorally in first fixation preferences. Response latencies, relative to fixation onset, were shortest for conspecific-selective neurons. Response latencies were also notably shorter in monkeys than in humans. To answer the second question, we investigated how visual representations in the medial temporal lobe are subsequently used to make two types of decisions: a recognition memory choice (“have you seen this image before?”), and a stimulus categorization choice (“Is this a face?”). We show that (i) there are distinct populations of cells in the medial frontal cortex (including anterior cingulate cortex, and supplementary motor cortex) encoding recognition memory or categorization-based choices; (ii) category-selective cells in the medial temporal lobe are insensitive to such task conditions; and (iii) spike-field coherence between field potentials in the medial temporal lobe and action potentials in the medial frontal cortex is enhanced during recognition memory choices. This suggests that inter-areal communication between these two brain regions may be facilitated selectively in tasks that rely on recognition memory-based information. Taken together these two components of this dissertation provide novel insights into how visual object representations in the human brain are gated by attention, and how they are used in decisions. This work thus for the first time provides a comprehensive characterization of how single neurons in the human brain participate in the cycle from perception to action.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph and Rutishauser, Ueli}, } @phdthesis{10.7907/Z9F47M2M, author = {Park, Soyoung}, title = {Connectivity and Function of the Primate Insula}, school = {California Institute of Technology}, year = {2016}, doi = {10.7907/Z9F47M2M}, url = {https://resolver.caltech.edu/CaltechTHESIS:01242016-181117777}, abstract = {The insula is a mammalian cortical structure that has been implicated in a wide range of low- and high-level functions governing one’s sensory, emotional, and cognitive experiences. One particular role of this region is considered to be processing of olfactory stimuli. The ability to detect and evaluate odors has significant effects on an organism’s eating behavior and survival and, in case of humans, on complex decision making. Despite such importance of this function, the mechanism in which olfactory information is processed in the insula has not been thoroughly studied. Moreover, due to the structure’s close spatial relationship with the neighboring claustrum, it is not entirely clear whether the connectivity and olfactory functions attributed to the insula are truly those of the insula, rather than of the claustrum. My graduate work, consisting of two studies, seeks to help fill these gaps. In the first, the structural connectivity patterns of the insula and the claustrum in a non-human primate brain is assayed using an ultra-high-quality diffusion magnetic resonance image, and the results suggest dissociation of connectivity — and hence function — between the two structures. In the second study, a functional neuroimaging experiment investigates the insular activity during odor evaluation tasks in humans, and uncovers a potential spatial organization within the anterior portion of the insula for processing different aspects of odor characteristics.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Allman, John Morgan and Adolphs, Ralph}, } @phdthesis{10.7907/Z95H7D7P, author = {Harrison, Laura Anne}, title = {Real-World Social Cognition: Context Effects in Face and Threat Processing}, school = {California Institute of Technology}, year = {2015}, doi = {10.7907/Z95H7D7P}, url = {https://resolver.caltech.edu/CaltechTHESIS:05292015-165113710}, abstract = {As borne out by everyday social experience, social cognition is highly dependent on context, modulated by a host of factors that arise from the social environment in which we live. While streamlined laboratory research provides excellent experimental control, it can be limited to telling us about the capabilities of the brain under artificial conditions, rather than elucidating the processes that come into play in the real world. Consideration of the impact of ecologically valid contextual cues on social cognition will improve the generalizability of social neuroscience findings also to pathology, e.g., to psychiatric illnesses. To help bridge between laboratory research and social cognition as we experience it in the real world, this thesis investigates three themes: (1) increasing the naturalness of stimuli with richer contextual cues, (2) the potentially special contextual case of social cognition when two people interact directly, and (3) a third theme of experimental believability, which runs in parallel to the first two themes. Focusing on the first two themes, in work with two patient populations, we explore neural contributions to two topics in social cognition. First, we document a basic approach bias in rare patients with bilateral lesions of the amygdala. This finding is then related to the contextual factor of ambiguity, and further investigated together with other contextual cues in a sample of healthy individuals tested over the internet, finally yielding a hierarchical decision tree for social threat evaluation. Second, we demonstrate that neural processing of eye gaze in brain structures related to face, gaze, and social processing is differently modulated by the direct presence of another live person. This question is investigated using fMRI in people with autism and controls. Across a range of topics, we demonstrate that two themes of ecological validity — integration of naturalistic contextual cues, and social interaction — influence social cognition, that particular brain structures mediate this processing, and that it will be crucial to study interaction in order to understand disorders of social interaction such as autism.}, address = {1200 East California Boulevard, Pasadena, California 91125}, month = {July}, advisor = {Adolphs, Ralph}, } @phdthesis{10.7907/Z9DN4307, author = {Gharib, Alma Mariam}, title = {Visual Behavior and Preference Decision-Making in Response to Faces in High-Functioning Autism}, school = {California Institute of Technology}, year = {2015}, doi = {10.7907/Z9DN4307}, url = {https://resolver.caltech.edu/CaltechTHESIS:06052015-203007731}, abstract = {

How do we come to the decision that we like a face? This thesis investigates this important aspect of social processing and communication by examining preference decisions for faces and the role that visual behavior plays in the process. I present a series of studies designed to investigate face preference formation and gaze patterns using eye-tracking and self-reported preference ratings. I tested healthy control subjects and two clinical populations known to have deficits in social processing: people with autism and patients with amygdala lesions. In studies one and two, I explore whether known social cognition deficits in people with autism and amygdala lesions also impair subjective decision-making regarding the attractiveness of faces. In study three, I investigate the flexibility of rule-based visual strategies used by these populations during face perception. Additionally, I present a custom algorithm developed to process raw eyetracking data, which was used to analyze all eyetracking data in this thesis.

People with autism and patients with amygdala lesions are known to have general deficits in social processing, including difficulty orienting toward and evaluating faces. Nevertheless, I find that their behavior is markedly similar in many areas where we would expect them to have abnormalities or deficiencies. Their preference decisions when judging facial attractiveness were highly correlated with those made by controls, and both groups showed the same biases for familiar faces over novel faces. In addition, people with autism exhibit the same visual sampling behavior linking preference and attentional orienting, but reach their decisions faster than controls and also appear insensitive to the difficulty of the choice. Finally, gaze to the eye region appears normal in the absence of an explicit decision-making task, but only when analyzed in a similar manner as previous studies. However, when face sub-regions were analyzed in greater detail, people with autism demonstrate abnormalities in face gaze patterns, failing to emphasize the most information-rich regions of the face. Furthermore, people with autism demonstrate impairments in their ability to update those gaze patterns to accommodate different viewing restrictions. Taken together, these findings support the idea that the normal formation of face preferences can be preserved in the presence of general social processing impairments. Patterns in the eyetracking and behavioral data indicate that this is made possible, in part, by compensatory atypical processing and visual strategies.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Shimojo, Shinsuke}, } @phdthesis{10.7907/Z90Z718H, author = {Wang, Shuo}, title = {Social Saliency: Visual Psychophysics and Single-Neuron Recordings in Humans}, school = {California Institute of Technology}, year = {2014}, doi = {10.7907/Z90Z718H}, url = {https://resolver.caltech.edu/CaltechTHESIS:05122014-203347930}, abstract = {My thesis studies how people pay attention to other people and the environment. How does the brain figure out what is important and what are the neural mechanisms underlying attention? What is special about salient social cues compared to salient non-social cues? In Chapter I, I review social cues that attract attention, with an emphasis on the neurobiology of these social cues. I also review neurological and psychiatric links: the relationship between saliency, the amygdala and autism. The first empirical chapter then begins by noting that people constantly move in the environment. In Chapter II, I study the spatial cues that attract attention during locomotion using a cued speeded discrimination task. I found that when the motion was expansive, attention was attracted towards the singular point of the optic flow (the focus of expansion, FOE) in a sustained fashion. The more ecologically valid the motion features became (e.g., temporal expansion of each object, spatial depth structure implied by distribution of the size of the objects), the stronger the attentional effects. However, compared to inanimate objects and cues, people preferentially attend to animals and faces, a process in which the amygdala is thought to play an important role. To directly compare social cues and non-social cues in the same experiment and investigate the neural structures processing social cues, in Chapter III, I employ a change detection task and test four rare patients with bilateral amygdala lesions. All four amygdala patients showed a normal pattern of reliably faster and more accurate detection of animate stimuli, suggesting that advantageous processing of social cues can be preserved even without the amygdala, a key structure of the “social brain”. People not only attend to faces, but also pay attention to others’ facial emotions and analyze faces in great detail. Humans have a dedicated system for processing faces and the amygdala has long been associated with a key role in recognizing facial emotions. In Chapter IV, I study the neural mechanisms of emotion perception and find that single neurons in the human amygdala are selective for subjective judgment of others’ emotions. Lastly, people typically pay special attention to faces and people, but people with autism spectrum disorders (ASD) might not. To further study social attention and explore possible deficits of social attention in autism, in Chapter V, I employ a visual search task and show that people with ASD have reduced attention, especially social attention, to target-congruent objects in the search array. This deficit cannot be explained by low-level visual properties of the stimuli and is independent of the amygdala, but it is dependent on task demands. Overall, through visual psychophysics with concurrent eye-tracking, my thesis found and analyzed socially salient cues and compared social vs. non-social cues and healthy vs. clinical populations. Neural mechanisms underlying social saliency were elucidated through electrophysiology and lesion studies. I finally propose further research questions based on the findings in my thesis and introduce my follow-up studies and preliminary results beyond the scope of this thesis in the very last section, Future Directions.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph}, } @phdthesis{10.7907/7EGE-FG03, author = {Dubois, Julien Christian Roger}, title = {Studying Conscious and Unconscious Vision with Functional Magnetic Resonance Imaging : the BOLD Promise}, school = {California Institute of Technology}, year = {2013}, doi = {10.7907/7EGE-FG03}, url = {https://resolver.caltech.edu/CaltechTHESIS:05312013-191910347}, abstract = {Waking up from a dreamless sleep, I open my eyes, recognize my wife’s face and am filled with joy. In this thesis, I used functional Magnetic Resonance Imaging (fMRI) to gain insights into the mechanisms involved in this seemingly simple daily occurrence, which poses at least three great challenges to neuroscience: how does conscious experience arise from the activity of the brain? How does the brain process visual input to the point of recognizing individual faces? How does the brain store semantic knowledge about people that we know? To start tackling the first question, I studied the neural correlates of unconscious processing of invisible faces. I was unable to image significant activations related to the processing of completely invisible faces, despite existing reports in the literature. I thus moved on to the next question and studied how recognition of a familiar person was achieved in the brain; I focused on finding invariant representations of person identity – representations that would be activated any time we think of a familiar person, read their name, see their picture, hear them talk, etc. There again, I could not find significant evidence for such representations with fMRI, even in regions where they had previously been found with single unit recordings in human patients (the Jennifer Aniston neurons). Faced with these null outcomes, the scope of my investigations eventually turned back towards the technique that I had been using, fMRI, and the recently praised analytical tools that I had been trusting, Multivariate Pattern Analysis. After a mostly disappointing attempt at replicating a strong single unit finding of a categorical response to animals in the right human amygdala with fMRI, I put fMRI decoding to an ultimate test with a unique dataset acquired in the macaque monkey. There I showed a dissociation between the ability of fMRI to pick up face viewpoint information and its inability to pick up face identity information, which I mostly traced back to the poor clustering of identity selective units. Though fMRI decoding is a powerful new analytical tool, it does not rid fMRI of its inherent limitations as a hemodynamics-based measure.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Koch, Christof}, } @phdthesis{10.7907/Z2N2-4C56, author = {Bryan, Ronald Edward}, title = {Distance Based Visual Cues to Interpersonal Trust}, school = {California Institute of Technology}, year = {2012}, doi = {10.7907/Z2N2-4C56}, url = {https://resolver.caltech.edu/CaltechTHESIS:06042012-095743080}, abstract = {This thesis examines the role of interpersonal spacing in determining the visual appearance and emotional response to images of faces. We present new methods for isolating the distance-dependent perspective projection as a visual feature, while controlling for confounding variables such as emotional expression. In behavioral experiments, we demonstrate the relevance of viewing distance to implicit social judgments, notably trust behavior in which real money was at stake. Finally, we provide tools for classifying face images according to viewing distance, and manipulating face images to simulate their appearance at different distances and different levels of trustworthiness.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph}, } @phdthesis{10.7907/1BCV-MQ11, author = {Harel, Jonathan}, title = {Neural Pattern Similarity and Visual Perception}, school = {California Institute of Technology}, year = {2012}, doi = {10.7907/1BCV-MQ11}, url = {https://resolver.caltech.edu/CaltechTHESIS:05302012-200743814}, abstract = {

This thesis addresses the question of whether people actually see the same visual stimuli somehow differently, and under what conditions, if so. It is an experimental contribution to the basic understanding of visual and especially face perception, and its neural correlates, with an emphasis on comparing patterns of neural activity driven by visual stimuli across trials and across individuals. We make extensive use of functional magnetic resonance imaging (fMRI); all inferences about neural activity are made via this intermediary. The thesis is organized into two parts:

In Part I, we investigate the nature of face familiarity and distinctiveness at perceptual and neural levels. We first address the question of how the faces of those people personally familiar to a viewer appear different than they would to an unfamiliar viewer. The main result is that they appear more distinctive, i.e., dissimilar to and distinguishable from other faces, and more so the higher the level of familiarity. Having established this connection between face familiarity and distinctiveness, we ask next what is different about the perception of such faces, as compared with indistinct and unfamiliar faces, at the level of brain activation. We find that familiar and distinctive faces are represented more consistently: compared with indistinct faces, which evoke slightly different patterns of activity with each new presentation, these faces evoke slightly similar patterns. Combined with the observation that consistency can enhance memory encoding (a result reported by Xue et al. [102]), this suggests a cyclic process for the learning of unfamiliar faces in which consistent representation and the presence of newly formed memories mutually feedback on each other.

Whereas in Part I we focus on individual differences in neural activity, principally by experimentally manipulating stimulus familiarity, in Part II, we shift our focus to similarities across individuals and extend our investigation beyond faces to the perception of visual objects in general and moving images. We begin with an experiment involving the perception of static images selected from 44 object categories, where we find that the distances between these categories, induced from activity in cortical visual object areas, correlate highly between subjects, and also to distances inferred from a behavioral clustering task, and that this correlation remains significant even among subsets of closely related categories. We also show that one subject’s brain activity can be accurately modeled using another’s, and that this allows us to predict which image a subject is viewing based on his/her brain activity. Then, in a different experiment investigating the perception of dynamic/video stimuli, we find evidence that when watching videos with sound, visual attention is likely blurred at times and transferred to audition; subjects relatively temporally decorrelate in visual areas compared to the muted case, in which the patterns of neural activity correlate across subjects at an average of 78% the level found with oneself later in time.

The findings reported in this thesis thus offer quantitative lower bounds on how similarly different individuals neurally experience visual stimuli, and an explanation for how they perceptually and neurally diverge when familiarity with a (face) stimulus varies, suggesting a possible mechanism for the encoding of new visual objects into memory. We conclude with a discussion of some of the questions raised by this work and directions for future research.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Koch, Christof}, } @phdthesis{10.7907/0XT9-2494, author = {Lin, Alice}, title = {Neural and Behavioral Investigations of Social Reward Processing}, school = {California Institute of Technology}, year = {2012}, doi = {10.7907/0XT9-2494}, url = {https://resolver.caltech.edu/CaltechTHESIS:06052012-163338072}, abstract = {Despite an extensive literature on the neural substrates of reward, relatively little is known about how social interactions modify decision-making. Here I present three experiments that examine the neural basis of social reward processing both in neurotypicals and individuals with autism spectrum disorder (ASD), a neuropsychiatric syndrome associated with social cognition impairments. Using functional magnetic resonance imaging (fMRI), I recorded brain activity during a probabilistic reward learning task with either social (smiling/frowning faces) or monetary (gaining/losing money) rewards. I found substantial overlap in the neural circuitry associated with social and non-social reward processing, suggesting that social rewards are processed similarly to other types of rewards. In contrast, individuals with ASD showed behavioral impairments in social reward processing, both in probabilistic reward learning and in an ecologically valid charitable donation task. Exploratory neuroimaging in ASD showed hypoactivation of key reward areas during decision-making. Taken together, these findings support the idea of a “common neural currency” in decision-making but also suggest the construction of accurate social reward value signals relies on recruitment of additional regions known to process social information.}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Rangel, Antonio}, } @phdthesis{10.7907/1A5T-S275, author = {Neumann, Dirk}, title = {Connectivity of the Brain from Magnetic Resonance Imaging}, school = {California Institute of Technology}, year = {2010}, doi = {10.7907/1A5T-S275}, url = {https://resolver.caltech.edu/CaltechTHESIS:04282010-153942989}, abstract = {

How do different parts of the brain work? The naive and somewhat ill-posed question nonetheless admits of a serious answer. Different regions of the brain carry out their function principally through two components: the pattern of inputs and outputs that connect a region with the rest of the brain, and the computational transformations implemented by neurons within the region itself. Here we focus on the former problem and study the connectivity of the primate brain, with an emphasis on neocortex.

We develop a novel set of algorithms for modeling anatomical connectivity based on diffusion-weighted magnetic resonance (MR) imaging. The approach is novel in several respects: it utilizes a new way of deriving a globally optimal solution from local message passing; it can be applied to the whole-brain level in a computationally tractable fashion; and it can flexibly incorporate much other information, such as constraints about the geometry of white-matter tracts and high-resolution anatomical MR images. The algorithm is first described as a hierarchical Bayesian model, and then applied to the diffusion MRI data obtained from two perfusion-fixed brains of macaque monkeys.

Based on the connectivity output provided from applying our novel algorithm to high-angular resolution MR data, we next derive several new insights about the connectivity of the macaque brain. We compare our results against those from published tracer studies, and we derive the relative weights of connections known from such prior studies. We also demonstrate the ability of the algorithm to generate entirely novel connectivity data, both at the level of specific anatomical regions that are queries, and also at the whole-brain level. The latter permits new insights into whole-brain connectivity and its architecture.

In addition to this focus on the structural connectivity of the macaque brain, we also analyze an extant set of public data of BOLD-fMRI from the macaque brain. This data set yields information regarding the functional connectivity of the macaque brain that we put together with our new connectivity results in order to relate structural and functional connectivity, with several new discoveries about their relationship.

In the final chapter, we apply these methods to MR data we collected from the live human brain. We provide an overview of structural and functional connectivity results obtained from this data set, and we apply the investigation to the brains of rare patients with agenesis of the corpus callosum, who lack the normal connection between the left and right hemispheres. We close by illustrating the power of the approach to ask questions that integrate functional questions with connectivity information on which function must ultimately be based: using connectivity profiles in order to segment cortical regions based on their pattern of inputs and outputs, with the aim of then querying these segmented regions using fMRI in cognitive activation studies. The description of our algorithm, the demonstration of its reliability, validity, and application to yield new data, together with the extensive software libraries on which the work is based, will provide cognitive neuroscientists with an array of new tools to investigate brain function in both health and disease.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph}, } @phdthesis{10.7907/M8XP-KT54, author = {Escobedo, Jessica Rose}, title = {Investigating Moral Events: Characterization and Structure of Autobiographical Moral Memories}, school = {California Institute of Technology}, year = {2009}, doi = {10.7907/M8XP-KT54}, url = {https://resolver.caltech.edu/CaltechETD:etd-11112008-122002}, abstract = {

Moral events and the actions, decisions and people they involve, are judged as right or wrong, and the moral responsibility associated with them generates further judgments, often legal in nature, of blame and punishment or praise. Not only do moral events and the normative judgments they presuppose define essential aspects of human nature, they are also ubiquitous at the level of society as well as the individual. Despite their importance, characterizing the sociological, psychological, and neurological features of moral events is in its infancy. Much of the recent research has focused on a priori philosophical frameworks and has used artificial events as probes, in part because collecting, characterizing and analyzing real-life moral events is a major undertaking. This dissertation attempts such an undertaking.

758 autobiographical memories of personal moral events were collected from a well-characterized and representative sample of 100 healthy Californian adults. Transcriptions of the events were further characterized and all data were entered into a large, searchable database. An initial set of results provides a detailed description of the participants and the memories of moral events they generated. This description showed that participants were highly representative of the general population of California; that the overall amount and patterns of moral events recollected was relatively universal and not influenced by gender, ethnicity, IQ, or personality; and that the moral events produced could generally be judged quite reliably both by the participants themselves as well as by independent raters.

The database was further analyzed with respect to three specific aims: (1) to study the semantic structure of real-life moral events; (2) to study the effects of focal lesions to emotion-related brain regions on recollection of moral events; (3) to study the temporal distribution of autobiographical moral events. We found that real-life moral events have a hierarchical structure, with two broad categories of “good” and “bad”, and subordinate categories of “good”, “lying”, “stealing”, and “hurting another person”. These categories define the most common scripts encountered in real life that have strong moral value. In studying neurological patients with focal lesions to the ventromedial prefrontal cortex or the amygdala, we found no evidence for a notable skew in the moral events that were recollected, further evidence for the universality and robustness of such events in our autobiography. Finally, we found that positively valenced moral events were systematically recalled as being more recent in time than negatively valenced moral events, a temporal bias that was independent of absolute participant age. The methods used here, the database that was constructed, and the scientific questions that were analyzed constitute the first comprehensive investigation of a large number of real-life moral events and provide a rich resource for future studies.

}, address = {1200 East California Boulevard, Pasadena, California 91125}, advisor = {Adolphs, Ralph}, }