@article{Whitney:1996, abstract = {States that although the think-aloud method (TAM) is being used with increasing frequency in studying text comprehension, some skepticism of its value remains. Discusses assumptions behind TAM, aspects of comprehension it can reveal, and directions for research using TAM. Argues that TAM is a useful technique for tracking changes in the contents of working memory during comprehension. }, author = {Whitney, P. and Budd, D.}, date-added = {2008-03-30 23:19:53 +0200}, date-modified = {2008-03-30 23:20:44 +0200}, journal = {Discourse Processing}, pages = {341-351}, title = {Think-aloud protocols and the study of comprehension}, volume = {21}, year = {1996} }
@article{Warnecke:7, abstract = {PURPOSE: The purpose of this paper is to briefly describe a theoretical model articulating cognitive theory and sources of potential response bias resulting from racial or ethnic cultural experience to survey questions that deal with health behavior. The theory components are then evaluated using questions obtained from national health surveys conducted by the National Center for Health Statistics and Centers for Disease Control and Prevention. The analysis explores the effects of four cognitive tasks involved in responding to questions as specified by the model: question interpretation, information retrieval from memory, judgment formation, and response editing. Implications for epidemiological research are considered. METHODS: Data were collected from a purposive sample of 423 adults aged 18 through 50 who were recruited to ensure equal numbers of African American, Puerto Rican, Mexican American, and non-Hispanic white respondents, stratified by age, gender, and education. Individual questions were selected for evaluation to ensure variation by topic and question format. Probes related to each of the cognitive tasks were designed to obtain insight into the underlying cognitive processes used by respondents to answer survey questions. All statistical analyses used logistic regression or ordinary least squares multiple regression as appropriate. RESULTS: Variation by race/ethnicity was found in the way respondents defined physical activity in a series of questions used in the Centers for Disease Control and Prevention Behavioral Risk Factor Surveillance System (BRFSS). Gender and race/ethnicity appeared to influence interpretation in the absence of specific cues in the question format about how to respond. Strategies used to retrieve information from memory did not appear to be influenced by respondent culture; however, frequency of the event was associated with the recall strategy in that more frequent or regular events were more likely to result in estimates about frequency, whereas unusual or seldom occurring events were counted. Effects of race/ethnicity on judgment formation seem to be reflected in the propensity of respondents' willingness to use extreme response categories. Most effects due to race/ethnicity were found in respondent editing of answers. Race/ethnicity was found to be associated with a social desirability trait; with willingness to disclose socially undesirable behavior, particularly to interviews from racial or ethnic groups that differed from the respondent; and with the tendency to overreport socially desirable behavior. CONCLUSIONS: Overall, the results of this research suggest several ways in which the validity of questions about risk behavior can be improved. In designing such questions, the investigator should envision the interview as a structured conversation in which ordinary conversational norms apply. Thus, questions that might request redundant information or that are threatening to the respondent need to be asked in ways that minimize these effects. Using interviewers of the same racial or ethnic group is important. Attending to the order of questions to ensure that redundant information is not requested is important. Writing questions to ensure that where response cues occur they lead the respondent to answer in unbiased ways is also important. Testing questions for potential racial or ethnic bias before using them is also important, even if the questions have been used successfully with population groups other than that or those included in a study.}, author = {Warnecke, R.B. and Johnson, T.P. and Chavez, N. and Sudman, S. and O'Rourke, D.P. and Lacey, L.}, date-added = {2008-03-30 23:18:01 +0200}, date-modified = {2008-03-30 23:19:40 +0200}, journal = {Annals of Epidemiology}, number = {334-342}, title = {Improving question wording in surveys of culturally diverse populations}, volume = {5}, year = {7} }
@book{Someren:1994, author = {van Someren, M.W. and Barnard, Y.F. and Sandberg, J.A.C.}, date-added = {2008-03-30 23:16:36 +0200}, date-modified = {2008-03-30 23:17:42 +0200}, publisher = {San Diego, CA: Academic Press}, title = {The think-aloud method: A practical guide to modelling cognitive processes}, year = {1994} }
@book{Turner:1984, author = {Turner, C.F. and Martin, E.}, date-added = {2008-03-30 23:15:48 +0200}, date-modified = {2008-03-30 23:16:24 +0200}, publisher = {New York: Russell Sage}, title = {Surveying subjective phenomena}, year = {1984} }
@article{Tucker:1997, abstract = {Dans l'empressement {\`a} appliquer la psychologie cognitive et ses m{\'e}thodes dans la recherche sociale par questionnaire pendant la derni{\`e}re d{\'e}cennie, une attention insuffisante a {\'e}t{\'e} donn{\'e}e aux principes scientifiques. Cet article fournit un cadre pour corriger ce probl{\`e}me en se concentrant sur des m{\'e}thodes qui peuvent am{\'e}liorer la validit{\'e} et la fiabilit{\'e} des donn{\'e}es collect{\'e}es avec l'appui de la psychologie cognitive. Ces m{\'e}thodes utilisent de meilleurs plans d'exp{\'e}riences et de meilleures techniques de mesure. Des plans d'exp{\'e}riences qui facilitent la comparaison entre des proc{\'e}dures cognitives alternatives et des travaux de chercheurs diff{\'e}rents sont pr{\'e}sent{\'e}s. Un autre aspect de ces plans est qu'ils demandent plus d'attention dans l'{\'e}laboration du protocole exp{\'e}rimental. En ce qui concerne la mesure, des techniques pour rendre l'utilisation des donn{\'e}es qualitatives plus syst{\'e}matique sont discut{\'e}es, et des m{\'e}thodes de construction d'indicateurs ordinaux et d'intervalle sont pr{\'e}sent{\'e}es}, author = {Tucker, C.}, date-added = {2008-03-30 23:14:03 +0200}, date-modified = {2008-03-30 23:15:43 +0200}, journal = {Bulletin de M{\'e}thodologie Sociologique}, pages = {67-92}, title = {Measurement issues surrounding the application of cognitive psychology in survey research}, volume = {55}, year = {1997} }
@article{Trabasso:1993, abstract = {Studies whether global, causal inferences are made during comprehension. Provides verbal protocol data in the form of talking aloud during reading. Discusses the data with reference to processing claims and working-memory models. Considers what verbal protocols reveal about processing.}, author = {Trabasso, T. and Suh, S.}, date-added = {2008-03-30 23:12:17 +0200}, date-modified = {2008-03-30 23:13:50 +0200}, journal = {Discourse Processing}, pages = {3-34}, title = {Understanding text: Achieving explanatory coherence through on-line inferences and mental operations in working memory}, volume = {16}, year = {1993} }
@book{Tourangeau:2000, author = {Tourangeau, R. and Rips, L.J. and Rasinski, K.}, date-added = {2008-03-30 23:11:18 +0200}, date-modified = {2008-03-30 23:12:10 +0200}, publisher = {Cambridge: Cambridge University Press}, title = {The psychology of survey response}, year = {2000} }
@article{Tourangeau:1988, abstract = {We begin this article with the assumption that attitudes are best understood as structures in long-term memory, and we look at the implications of this view for the response process in attitude surveys. More specifically, we assert that an answer to an attitude question is the product of a four-stage process. Respondents first interpret the attitude question, determining what attitude the question is about. They then retrieve relevant beliefs and feelings. Next, they apply these beliefs and feelings in rendering the appropriate judgment. Finally, they use this judgment to select a response. All four of the component processes can be affected by prior items. The prior items can provide a framework for interpreting later questions and can also make some responses appear to be redundant with earlier answers. The prior items can prime some beliefs, making them more accessible to the retrieval process. The prior items can suggest a norm or standard of comparison for making the judgment. Finally, the prior items can create consistency pressures or pressures to appear moderate.}, author = {Tourangeau, R. and Rasinski, K.}, date-added = {2008-03-30 23:09:50 +0200}, date-modified = {2008-03-30 23:11:09 +0200}, journal = {Psychological Bulletin}, pages = {299-314}, title = {Cognitive processes underlying context effects in attitude measurement}, volume = {103}, year = {1988} }
@inbook{Tourangeau:1992, author = {Tourangeau, R.}, chapter = {Attitudes as memory structures: Belief sampling and context effects}, date-added = {2008-03-30 23:08:16 +0200}, date-modified = {2008-03-30 23:09:23 +0200}, editor = {N. Schwarz and S. Sudman}, pages = {35-47}, publisher = {New York: Springer-Verlag}, title = {Context effects in social and psychological research}, year = {1992} }
@article{Tomaskovic-Devey:1994, abstract = {Organizational surveys often have fairly low response rates. Sample surveys with low response rates can produce biased samples, particularly if key organizational characteristics affect the pattern of survey response. In this paper we develop an organizational theory of survey response that details why well-known organizational characteristics should be expected to influence the probability that an organization will respond to a survey request. We then test the empirical predictions of this theory on a survey of private, for-profit employment organizations.}, author = {Tomaskovic-Devey, D. and Leiter, J. and Thompson, S.}, date-added = {2008-03-30 23:06:24 +0200}, date-modified = {2008-03-30 23:07:58 +0200}, journal = {Administrative Science Quarterly}, pages = {439-457}, title = {Organizational survey nonresponse}, volume = {39}, year = {1994} }
@article{Sykes:1987, author = {Sykes, W. and Morton-Williams, J.}, date-added = {2008-03-30 23:05:18 +0200}, date-modified = {2008-03-30 23:06:00 +0200}, journal = {Journal of Official Statistics}, pages = {191-207}, title = {Evaluating survey questions}, volume = {3}, year = {1987} }
@book{Sudman:1982, author = {Sudman, S. and Bradburn, N.}, date-added = {2008-03-30 23:04:26 +0200}, date-modified = {2008-03-30 23:05:04 +0200}, publisher = {San Francisco: Jossey-Bass}, title = {Asking questions: A practical guide to questionnaire design}, year = {1982} }
@article{Skinner:1950, author = {Skinner, B.F.}, date-added = {2008-03-30 23:02:30 +0200}, date-modified = {2008-03-30 23:03:04 +0200}, journal = {Psychological Review}, pages = {193-216}, title = {Are theories of learning necessary?}, volume = {57}, year = {1950} }
@book{Selltiz:1959, author = {Selltiz, C. and Jahoda, M. and Deutsch, M. and Cook, S.W.}, date-added = {2008-03-30 23:00:48 +0200}, date-modified = {2008-03-30 23:01:52 +0200}, publisher = {New York: Holt, Rinehart & Winston}, title = {Research methods in social relations}, year = {1959} }
@article{Schwarz:1991, abstract = {A theoretical model of the emergence of assimilation and contrast effects in part-whole question sequences is presented. When one specific question precedes a general question and the two are not assigned to the same conversational context, respondents use the information primed by the specific question to form the general judgment. This results in part-whole assimilation effects. If both questions are perceived as belonging together, however, conversational norms of nonredundancy prohibit the repeated use of information that has already been provided in response to the specific question when making the general judgment. Accordingly, respondents interpret the general question to refer to aspects other than the ones covered by the specific question. Contrast effects may emerge in that case under specified conditions. If several specific questions precede the general question, however, the general one is always interpreted as a request for a summary judgment. This results in assimilation effects, even under conditions that would foster constrast effects if only one specific question is asked. The model is supported by experimental data and provides a coherent account of apparently contradictory findings previously reported in the survey literature.}, author = {Schwarz, N. and Strack, F. and Mai, H.}, date-added = {2008-03-30 22:47:55 +0200}, date-modified = {2008-03-30 22:50:14 +0200}, journal = {Public Opinion Quarterly}, pages = {3-23}, title = {Assimilation and contrast effects in part-whole question sequences: A conversational logic analysis}, volume = {55}, year = {1991} }
@article{Schwarz:2006, abstract = {In social and psychological research, respondents are often asked to report the frequency of a behaviour by checking the appropriate alternative from a list of response categories provided to them. Previous research indicated that respondents extract comparison information from the range of the response alternatives, assuming that the average respondent is represented by values in the middle range of the scale, and that the extremes of the scale represent the extremes of the distribution. Extending this line of research, the present studies demonstrate that the users of a respondent's report are also likely to use the range of the response alternatives as a frame of reference in evaluating the implications of the report. Specifically, subjects are found to draw different conclusions about the respondent's personality (Experiment l ), or the severity of his or her medical condition (Experiment 21, from the same absolute frequency report, depending upon the range of the response scale on which the frequency was checked. Moreover, experienced medical doctors were as likely to be influenced by scale range as first-year medical students, suggesting that the phenomenon is of considerable applied importance. Implications for the use of response alternatives in psychological research and diagnostic judgement are discussed.}, author = {Schwarz, N. and Bless, H. and Bohner, G. and Harlacher, U. and Kellenbenz, M.}, date-added = {2008-03-30 22:46:21 +0200}, date-modified = {2008-03-30 22:47:21 +0200}, journal = {Applied Cognitive Psychology}, number = {1}, pages = {37-49}, title = {Response scales as frames of reference: The impact of frequency range on diagnostic judgements}, volume = {5}, year = {2006} }
@article{Richardson:2004, abstract = {Response options presented on questionnaires affect self-reports concerning the frequency of loneliness. Two experiments using items from the UCLA Loneliness Scale---Version 3 (ULS) presented participants with either of two response option ranges. In Experiment 1, response options in the high-frequency condition ranged from ``Every Day'' to ``Never.'' In the low-frequency condition, response options ranged from ``Once a month or more often'' to ``Never.'' In Experiment 2, response options ranged from ``Every Day'' to ``Never'' in the high-frequency condition and from ``Once a week or more often'' to ``Never'' in the low-frequency condition. Results confirmed that self-reported frequency of experiences related to loneliness increased when participants were presented with response options emphasizing higher frequency. Implications for the ULS in particular, and questionnaire design in general, are discussed.}, author = {Richardson, J.D.}, date-added = {2008-03-30 22:45:13 +0200}, date-modified = {2008-03-30 22:45:57 +0200}, journal = {Personality and Individual Differences}, number = {1}, pages = {235-244}, title = {Isolating frequency scale effects on self-reported loneliness}, volume = {36}, year = {2004} }
@article{Rockwood:1997, abstract = {This article reports the effect that the ranges presented in answer categories for survey questions can have on respondent answers. Response categories were manipulated in a split-ballot survey conducted in both telephone and mail modes. These categories, presented in the separate ballots, overlapped in one category; the other categories were unique to each ballot. The experiment was conducted on four questions: two frequent and mundane and two rare and salient. It was found that the response categories significantly affected the response for frequent and mundane questions. One question demonstrated a significant difference in response between the mail and telephone modes. For this question, a response scale with a limited number of socially desirable alternatives resulted in a social desirability effect in the telephone mode. Alternatively, the telephone mode demonstrated an extremeness effect when the response scale comprised a greater number of socially desirable alternatives.}, author = {Rockwood, T.H. and Sangster, R.L. and Dillman, D.A.}, date-added = {2008-03-30 22:43:35 +0200}, date-modified = {2008-03-30 22:44:33 +0200}, journal = {Sociological Methods & Research}, number = {1}, pages = {118-140}, title = {The Effect of Response Categories on Questionnaire Answers}, volume = {26}, year = {1997} }
@article{Rammstedt:2007, abstract = {When developing a questionnaire, one puts much effort into item formulation. Whether the format of the corresponding response scales affects response behavior, however, has rarely been studied, to date. The present study investigates (1) the effects of the response scale direction (ranging from positive to negative vs. negative to positive) and (2) the match between numerical labeling and scale direction, i.e., assigning high numbers to the positive pole and low numbers to the negative pole or vice versa. These response scale effects were studied based on responses in the BFI-10, the short-scale version of the widely-used Big Five Inventory (BFI), assessing the Big Five dimensions of personality by two items each. Using a dependent sample design, subjects answered the 10 items using end-point labeled response scales ranging from the negative (labeled "1") to the positive pole (labeled "8") at Time 1. At Time 2 (approximately 3 weeks later), respondents were split into two conditions: The BFI-10 was administered again with an 8-point scale ranging from the positive to the negative pole. In the first condition, the positive pole was labeled "8" and the negative pole was labeled "1"; in the second condition, the positive pole was labeled "1" and the negative pole was labeled "8." Results clearly support the notion that the direction of the response scale (Condition 1) does not affect response behavior. There were no differences in means, standard deviations, or in the intercorrelations patterns. However, there seems to be an intuitive match between the positive pole and high numerical labeling. When poles were counterintuitively labeled (Condition 2), significant differences could be identified for all analyses conducted.}, author = {Rammstedt, B. and Krebs, D.}, date-added = {2008-03-30 22:42:24 +0200}, date-modified = {2008-03-30 22:43:15 +0200}, journal = {European Journal of Psychological Assessment}, number = {1}, pages = {32-38}, title = {Does Response Scale Format Affect the Answering of Personality Scales? Assessing the Big Five Dimensions of Personality with Different Response Scales in a Dependent Sample.}, volume = {23}, year = {2007} }
@article{Schwarz:1985, author = {Schwarz, N. and Hippler, H. and Deutsch, B. and Strack, F.}, date-added = {2008-03-30 22:40:41 +0200}, date-modified = {2008-03-30 22:41:47 +0200}, journal = {Public Opinion Quarterly}, pages = {388-395}, title = {Response scales: Effects of category range on reported behavior and comparative judgments}, volume = {49}, year = {1985} }
@article{Schwarz:1990, abstract = {Previous research demonstrated that respondents assume that the range of precoded response alternatives reflects the researcher's knowledge of the distribution of opinions or behaviours in the population. This assumption may influence respondents' reports in two ways: respondents may either use the range of the response alternatives as a frame of reference in estimating their own behavioural frequencies, or they may be reluctant to report frequencies that appear extreme in the context of the scale. Three experiments using reports of mundane behaviours, namely watching TV and drinking beer, were conducted to differentiate between the frame of reference and the self-presentation hypothesis. The results of all studies favour the frame of reference hypothesis, and suggest that the impact of response alternatives is the more pronounced the less episodic information about the behaviour is accessible in memory. Specifically, proxy-reports were found to be more affected by the range of response alternatives than self-reports (Experiments 1 and 2), and respondents with dispositionally low access to self-related information were found to be more affected than respondents with dispositionally high access to self-related information (Experiment 3). Implications for questionnaire construction are discussed.}, author = {Schwarz, N. and Bienias, J.}, date-added = {2008-03-30 22:39:10 +0200}, date-modified = {2008-03-30 22:40:29 +0200}, journal = {Applied Cognitive Psychology}, pages = {61-72}, title = {What mediates the impact of response alternatives on frequency reports of mundane behaviors}, volume = {4}, year = {1990} }
@book{Schuman:1981, author = {Schuman, H. and Presser, S.}, date-added = {2008-03-30 22:38:04 +0200}, date-modified = {2008-03-30 22:38:50 +0200}, publisher = {New York: Academic Press}, title = {Questions and answers in attitude surveys: Experiments on question form, wording, and context}, year = {1981} }
@article{Schuman:1966, abstract = {The familiar dilemna of open versus closed interview questions becomes especially acute when surveys are undertaken outside middle-class American society. Inevitable ignorance of the subtleties of another culture leads the researcher toward an open-ended approach, while his experience with the difficulties of channeling diverse free responses into a useful frame of reference and of coding enormous masses of verbal data encourages him to rely on closed questions. The method of ``random probes'' suggested here is intended to allow a survey researcher to eat his cake and still have a litlle left over.}, author = {Schuman, H.}, date-added = {2008-03-30 22:31:11 +0200}, date-modified = {2008-03-30 22:32:40 +0200}, journal = {American Sociological Review}, pages = {218-222}, title = {The random probe: A technique for evaluating the validity of closed questions}, volume = {21}, year = {1966} }
@article{Schober:1997, author = {Schober, M.F. and Conrad, F.G.}, date-added = {2008-03-30 22:29:32 +0200}, date-modified = {2008-03-30 22:30:16 +0200}, journal = {Public Opinion Quarterly}, pages = {576-602}, title = {Does conversational interviewing reduce survey measurement error?}, volume = {61}, year = {1997} }
@inbook{Schober:1999, author = {Schober, M.F.}, chapter = {Making sense of questions: An interactional approach}, date-added = {2008-03-30 22:28:05 +0200}, date-modified = {2008-03-30 22:29:18 +0200}, editor = {M. Sirken and D. Herrmann and S. Schechter and N. Schwarz and J. Tanur and R. Tourangeau}, pages = {77-93}, publisher = {New York: John Wiley & Sons}, title = {Cognition and survey research}, year = {1999} }
@inbook{Schaeffer:2004, author = {Schaeffer, N.C. and Dykema, J.L.}, chapter = {Improving the clarity of closely related concepts}, date-added = {2008-03-30 22:26:07 +0200}, date-modified = {2008-03-30 22:27:22 +0200}, editor = {S. Presser and J. Rothgeb and M. Couper and J. Lessler and E. Martin and J. Martin}, publisher = {New York: John Wiley & Sons}, title = {Methods for testing and evaluating survey questionnaires}, year = {2004} }
@inbook{Schaeffer:1999, author = {Schaeffer, N.C.}, chapter = {Asking questions about threatening topics: A selective overview}, date-added = {2008-03-30 22:23:52 +0200}, date-modified = {2008-03-30 22:26:03 +0200}, editor = {A.A. Stone and J.S. Turkkan and C.A. Bachrach and J.B. Jobe and H.S. Kurtzman and V.S. Cain}, pages = {105-122}, publisher = {Mahwah, NJ: Lawrence Erlbaum}, title = {The science of self-report: Implications for research and practice}, year = {1999} }
@inbook{Reeve:2004, abstract = {In this chapter we provide a basic introduction to IRT modeling (item response theory), including a discussion of the common IRT models used in research, underlying assumptions of these models, and differences between CTT and IRT modeling. The introduction is followed by a demonstration of the information that can be gained by using IRT to evaluate the psychometric properties of a questionnaire.}, author = {Reeve, B.B. and Masse, L.C.}, chapter = {Item response theory modeling for questionnaire evaluation}, date-added = {2008-03-30 22:13:57 +0200}, date-modified = {2008-03-30 22:16:26 +0200}, editor = {S. Presser and J. Rothgeb and M. Couper and J. Lessler and E. Martin and J. Martin}, publisher = {New York: John Wiley & Sons}, title = {Methods for testing and evaluating survey questionnaires}, year = {2004} }
@article{Pober:2001, abstract = {Over the last quarter of the 20th century, there has been a boom in biomedical research discoveries that, for the most part, has not been successfully exploited for improving medical therapy or diagnosis. This lack of success is surprising because there is a broad consensus within academic medical centers (AMCs) that a primary mission is to move scientific discoveries into meaningful clinical outcomes, and there are numerous opportunities for doing so. We illustrate the latter point with 10 clinical opportunities for translating scientific discoveries from our field of vascular biology and transplantation. We attribute the limited success of translation to various factors, chief of which is that translation is rarely straightforward and requires continuing research in both the clinic and the laboratory. Translational research is hindered by insufficient targeted resources, a shortage of qualified investigators, an academic culture that hinders collaboration between clinical and laboratory-based investigators, a traditional structure of the AMC that favors departmental efforts over interdisciplinary programs, an increasing regulatory burden, and a lack of specific mechanisms within the AMC for facilitating solutions to these problems. We offer several suggestions to reduce these impediments.---Pober, J. S., Neuhauser, C. S., Pober, J. M. Obstacles facing translational research in academic medical centers.}, author = {Pober, J.S. and Neuhauser, C.S. and Pober, J.M.}, date-added = {2008-03-30 22:12:10 +0200}, date-modified = {2008-03-30 22:13:29 +0200}, journal = {The FASEB Journal}, number = {13}, pages = {2303-2313}, title = {Obstacles facing translational research in academic medical centers}, volume = {15}, year = {2001} }
@book{Payne:1951, author = {Payne, S.L.}, date-added = {2008-03-30 22:11:18 +0200}, date-modified = {2008-03-30 22:11:49 +0200}, publisher = {Princeton, NJ: Princeton University Press}, title = {The art of asking questions}, year = {1951} }
@book{Oppenheim:1992, author = {Oppenheim, A.N.}, date-added = {2008-03-30 22:10:13 +0200}, date-modified = {2008-03-30 22:11:03 +0200}, edition = {2nd}, publisher = {London: Pinter Publications}, title = {Questionnaire design, interviewing and attitude measurement}, year = {1992} }
@book{Oppenheim:1966, author = {Oppenheim, A.N.}, date-added = {2008-03-30 22:09:31 +0200}, date-modified = {2008-03-30 22:10:07 +0200}, publisher = {New York: Basic Books}, title = {Questionnaire design and attitude measurement}, year = {1966} }
@article{Nisbett:1977, abstract = {Evidence is reviewed which suggests that there may be little or no direct introspective access to higher order cognitive processes. It is proposed that when people attempt to report on their cognitive processes they do not do so on the basis of any true introspection. }, author = {Nisbett, R.E. and Wilson, T.D.}, date-added = {2008-03-30 21:03:45 +0200}, date-modified = {2008-03-30 21:04:54 +0200}, journal = {Psychological Review}, pages = {231-259}, title = {Telling more than we know: Verbal reports on mental processes}, volume = {84}, year = {1977} }
@book{Newell:1972, author = {Newell, A. and Simon, H.A.}, date-added = {2008-03-30 21:02:46 +0200}, date-modified = {2008-03-30 21:03:25 +0200}, publisher = {Englewood Cliffs, NJ: Prentice-Hall}, title = {Human Problem Solving}, year = {1972} }
@book{Neisser:1988, author = {Neisser, U. and Winograd, E.}, date-added = {2008-03-30 21:01:43 +0200}, date-modified = {2008-03-30 21:02:35 +0200}, publisher = {New York: Cambridge University Press}, title = {Remembering reconsidered: Ecological and traditional approaches to the study of memory}, year = {1988} }
@article{Morton-Williams:1984, abstract = {This paper was based on findings from a research programme, funded by the ESRC and instituted at the Survey Methods Centre at SCPR under the directorship of the late Professor Gerald Hoinville. Building on the work of researchers in a number of diverse fields, the programme of research sought to 'lay bare' aspects of the survey process which are normally concealed: namely the interactions that take place between interviewers and respondents in the field. The main tool which was developed to assist in this was a classification of interviewer and respondent behaviour, applied in a systematic way to tape-recordings of interviews taking place in the field. For the purposes of this paper, we selected codes which we felt were indicative of problems with the administration or answering of survey questions (e.g. requests for clarification, questions misread), and attempted to identify the items in a survey which seemed persistently to create difficulties of one kind or another. Clarification of the nature of these difficulties was sought using an approach developed in the UK by Bill Belson. Semi-structured interviews were carried out with respondents to the original survey exploring - for each 'problem' item -their understanding of the question, the process by which they had arrived at their answer, their motivation to respond and their views as to the 'accuracy' of their response. The methods we employed now form part of a well-recognised battery of approaches which fall under the loose rubric of cognitive techniques. At the time - in the UK at least - we were breaking new ground and this is all too evident in our rather laborious introductory sections. Nowadays a brief reference to behaviour coding and retrospective 'think aloud' cognitive interviews would do away with most of the first few pages! Whatever the intrinsic value of the contents of this paper, we believe that it contributed to the development of current interest in, and application of more deeply probing methods for developing and testing survey questions. Probably the most famous of all surveys - the Census - is currently being rehauled using ~uch methods. Our re-reading of the paper reminds us only too vividly of the sense of excitement (and occasional despair) with which we approached the whole project. Most remarkable to recall is the opportunity which was given to us to undertake a labour intensive, exploratory piece of work. And to follow our interest without commitment to a specific output or 'useful' application beyond the reporting of our findings.}, author = {Morton-Williams, J. and Sykes, W.}, date-added = {2008-03-30 20:59:27 +0200}, date-modified = {2008-03-30 21:01:22 +0200}, journal = {Journal of the Market Research Society}, pages = {109-127}, title = {The use of interaction coding and follow-up interviewers to investigate the comprehension of survey questions}, volume = {26}, year = {1984} }
@article{Morton-Williams:1979, author = {Morton-Williams, J.}, date-added = {2008-03-30 20:58:12 +0200}, date-modified = {2008-03-30 20:58:59 +0200}, journal = {Quality and Quantity}, pages = {59-75}, title = {The use of `verbal interaction coding' for evaluating a questionnaire}, volume = {13}, year = {1979} }
@inbook{McKay:1996, author = {McKay, R.B. and Breslow, M.J. and Sangster, R.L. and Gabbard, S.M. and Reynolds, R.W. and Nakamoto, J.M.}, chapter = {Translating survey questionnaires: Lessons learned}, date-added = {2008-03-30 20:55:48 +0200}, date-modified = {2008-03-30 20:57:31 +0200}, editor = {M.T. Braverman and J.K. Slater}, pages = {93-104}, publisher = {San Francisco: Jossey-Bass}, title = {Advances in survey research}, year = {1996} }
@article{Martin:1964, author = {Martin, J.}, date-added = {2008-03-30 20:52:40 +0200}, date-modified = {2008-03-30 20:53:31 +0200}, journal = {British Journal of Social and Clinical Psychology}, pages = {216-225}, title = {Acquiescence--measurement and theory}, volume = {3}, year = {1964} }
@inbook{Martin:1983, author = {Martin, E.}, chapter = {Surveys as social indicators: Problems in monitoring trends}, date-added = {2008-03-30 20:51:11 +0200}, date-modified = {2008-03-30 20:52:21 +0200}, editor = {P.H. Rossi and J.D. Wright and A.B. Anderson}, publisher = {New York: Academic Press}, title = {Handbook of survey research}, year = {1983} }
@article{Marquis:1986, abstract = {This discussion focuses on a reexamination of empirical evidence on the direction and size of response biases and estimates of the variance of the response error distributions for sensitive topics (receipt of welfare, income, alcohol use, drug use, criminal history, and embarrassing medical conditions). Partial or unidirectional record check studies are used as evidence of underreporting on specific topics. The estimates suggest that response biases are not uniformly negative but center around zero. Unreliability as measured in response error variance estimates appears to be very high. A response model based on classical test theory and other studies of Hansen, Hurwitz, and Bershad, and Hansen, Hurwitz, and Pritzer is presented. A review is given of empirical studies which estimate response error parameters. Estimates of response bias and error variance are given with a discussion of implications for methodological research and survey design. The conclusion in this review of the literature on response bias and reliability is that modeling of design features and response errors is not possible. There is no apparent link between features such as length of recall period, respondent rules, mode of data collection, working of survey questions, and rules for matching the survey and criterion sources. The results of the review for income show that income studies and welfare studies have a net response bias close to zero or no bias. In the income studies, other sources of potential estimation error from sampling, definitions, treatment of missing observations may cause survey estimates to be less than estimates made from administrative records. The drug and alcohol estimates are more disperse but still are not negative. For example, in the comparison of drug self-reports to urine test reports, 50% of studies show negative estimates, and 50% show positive estimates. Net bias estimates of crime data tend to be positive. For socially embarrassing problems (mental illness, hemorrhoids, and diseases of the genitourinary tract system), the large average response bias estimates are mostly negative, which may be due to poor questionnaire design. Only income estimates are higher than the accepted value of 70%, which means techniques should be used to overcome the bias of random measurement error.}, author = {Marquis, K.H. and Marquis, M.S. and Polich, J.M.}, date-added = {2008-03-30 20:49:32 +0200}, date-modified = {2008-03-30 20:51:00 +0200}, journal = {Journal of the American Statistical Association}, number = {394}, pages = {381-389}, title = {Response bias and reliability in sensitive topic surveys}, volume = {81}, year = {1986} }
@article{Lee:1999, abstract = {Surveys of childhood vaccinations are often highly inaccurate, due to parental misreporting. We conducted three experiments to examine the source of the inaccuracies. In Experiment 1, we provided parents with memory aids; these aids did little to improve reporting accuracy. Two further experiments asked whether parents forgot what they knew about their children's vaccinations, or whether they never knew the information. In Experiment 2 we surveyed parents both immediately and ten weeks after their child's medical visit. Accuracy was only slightly better than chance immediately afterwards; ten weeks later performance had not changed significantly. Experiment 3 compared reports in both recall and recognition conditions. Although the recognition condition lowered the response burden on parents it did not produce more accurate reports. We conclude that low levels of accuracy in parental reports on vaccinations appear to reflect poor initial encoding rather than retrieval failure.}, author = {Lee, L. and Brittingham, A. and Tourangeau, R. and Ching, P. and Willis, G.}, date-added = {2008-03-30 20:47:01 +0200}, date-modified = {2008-03-30 20:48:53 +0200}, journal = {Applied Cognitive Psychology}, pages = {43-63}, title = {Are reporting errors due to encoding limitations or retrieval failure? Surveys of child vaccination as a case study}, volume = {13}, year = {1999} }
@article{Lashley:1923, abstract = {The problem which confronts the behaviorist is to find in the physical world deterministic relations between nonqualitative, discrete entities in time and space which fulfill certain conditions of relationship laid down by subjective evidence. Enough of organic behavior is not known to be able to say just how bodily mechanisms do bring about the details of behavior, but it is possible to make rather probable guesses as to what is going on at any given time, and to outline roughly the kind of mechanisms that control activity. The object of this article is to discuss consciousness in the context of behaviorism and to point out that the supposed problem of consciousness does not present insurmountable difficulties to behavioristic treatment. The author of this article concludes that the behaviorist may go his way without fear that his final account will fail of including 'mind' and with the conviction that the inclusion of 'mind' will add nothing to scientific psychology.}, author = {Lashley, K.S.}, date-added = {2008-03-30 20:44:51 +0200}, date-modified = {2008-03-30 20:46:49 +0200}, journal = {Psychological Review}, pages = {329-353}, title = {The behavioristic interpretation of consciousness II}, volume = {30}, year = {1923} }
@article{Krosnick:1991, abstract = {This paper proposes that when optimally answering a survey question would require substantial cognitive effort, some repondents simply provide a satisfactory answer instead. This behaviour, called satisficing, can take the form of either (1) incomplete or biased information retrieval and/or information integration, or (2) no information retrieval or integration at all. Satisficing may lead respondents to employ a variety of response strategies, including choosing the first response alternative that seems to constitute a reasonable answer, agreeing with an assertion made by a question, endorsing the status quo instead of endorsing social change, failing to differentiate among a set of diverse objects in ratings, saying don't knowinstead of reporting an opinion, and randomly choosing among the response alternatives offered. This paper specifies a wide range of factors that are likely to encourage satisficing, and reviews relevant evidence evaluating these speculations. Many useful directions for future research are suggested.}, author = {Krosnick, J.A.}, date-added = {2008-03-30 20:43:07 +0200}, date-modified = {2008-03-30 20:44:22 +0200}, journal = {Applied Cognitive Psychology}, pages = {213-236}, title = {Response strategies for coping with the cognitive demands on attitude measures in surveys}, volume = {5}, year = {1991} }
@book{Kirk:1986, author = {Kirk, J. and Miller, M.L.}, date-added = {2008-03-30 20:42:17 +0200}, date-modified = {2008-03-30 20:42:54 +0200}, publisher = {Newbury Park, CA: Sage}, title = {Reliability and validity in qualitative research}, year = {1986} }
@article{Johnson:1998, author = {Johnson, T.P.}, date-added = {2008-03-30 20:40:33 +0200}, date-modified = {2008-03-30 20:41:21 +0200}, journal = {ZUMA-Nachrichten Spezial}, pages = {1-40}, title = {Approaches to equivalence in cross-cultural and cross-national survey research}, volume = {3}, year = {1998} }
@article{Jobe:1993, abstract = {This article reviews the results of survey methodological research that illustrate phenomena of potential interest to investigators of memory, and also reviews psychological research designed to explore some of these phenomena under controlled laboratory conditions. We classify the phenomena reviewed into broad categories of remembering what events occurred, remembering when events occurred, and estimation and reconstruction processes used in reporting recurring events. We delineate the contributions of this research for our understanding of memory phenomena and show where this research has revealed gaps in our existing theories and knowledge. These findings present challenges for future collaboration between cognitive psychologists and survey methodologists.}, author = {Jobe, J.B. and Tourangeau, R. and Smith, A.F.}, date-added = {2008-03-30 20:39:12 +0200}, date-modified = {2008-03-30 20:40:24 +0200}, journal = {Applied Cognitive Psychology}, pages = {567-584}, title = {Contributions of survey research to the understanding of memory}, volume = {7}, year = {1993} }
@article{Jobe:1991, abstract = {In 1978 a small group of cognitive psychologists and survey methodologists initiated research in a new and exciting interdisciplinary field. This research applies the methods and theories of cognitive science to the study of respondents' answers to autobiographical and attitude surveys. This article describes the events that led up to the initiation of research, describes the history of the field, and overviews the cutting edge of research. Finally, the article offers perspectives on the benefits of collaborative research to both cognitive science and survey research, and the prospects for future research.}, author = {Jobe, J.B. and Mingay, D.J.}, date-added = {2008-03-30 20:37:54 +0200}, date-modified = {2008-03-30 20:39:06 +0200}, journal = {Applied Cognitive Psychology}, pages = {175-192}, title = {Cognition and survey measurement: History and overview}, volume = {5}, year = {1991} }
@inbook{Jobe:1996, author = {Jobe, J.B. and Hermann, D.J.}, chapter = {Implications of models of survey cognition for memory theory}, date-added = {2008-03-30 20:35:34 +0200}, date-modified = {2008-03-30 20:37:29 +0200}, editor = {D. Herrmann and C. McEvoy and C. Herzog and P. Hertel and M. Johnson}, pages = {193-205}, publisher = {Hillsdale, NJ: Erlbaum}, title = {Basic and Applied Memory Research: Vol. 2. Practical application}, year = {1996} }
@book{Hippler:1987, author = {Hippler, H. and Schwarz, N. and Sudman, S. (Eds.)}, date-added = {2008-03-30 20:33:57 +0200}, date-modified = {2008-03-30 20:34:59 +0200}, publisher = {New York: Springer-Verlag}, title = {Social information processing and survey methodology}, year = {1987} }
@article{Hasher:1978, abstract = {Current views of prose memory argue that memory inaccuracies in the retelling of a complex event occur in part as the result of a storage deficit induced by the abstractive and assimilative aspects of prose processing. This view appears to contradict a large portion of the memory literature that shows, over long intervals, remarkably accurate recall. A perspective, based on an elaboration of Underwood's attributes model of memory, is advanced which proposes that for all types of information both detailed and thematic attributes are stored. Consequently, the type of recall one sees, whether reconstructive or reproductive in nature, depends in part upon events that occur at the time of the request for recall. Two experiments using prose passages as stimulus materials with retention tested by free recall support this perspective. Subjects were treated identically until the test of recall, when two sets of procedures were introduced, one that led subjects to reconstruct the story and one that led subjects to reproduce the story.}, author = {Hasher, L. and Griffin, M.}, date-added = {2008-03-30 20:31:23 +0200}, date-modified = {2008-03-30 20:33:24 +0200}, journal = {Journal of Experimental Psychology: Human Learning and Memory}, pages = {318-330}, title = {Reconstructive and reproductive processes in forgetting}, volume = {4}, year = {1978} }
@article{Harlow:1989, author = {Harlow, S.D. and Linet, M.S.}, date-added = {2008-03-30 20:29:07 +0200}, date-modified = {2008-03-30 20:30:06 +0200}, journal = {American Journal of Epidemiology}, pages = {233-248}, title = {Agreement between questionnaire data and medical records: The evidence for accuracy of recall}, volume = {129}, year = {1989} }
@book{Harkness:2003, author = {Harkness, J.A. and Van de Vijver, F.J.R. and Mohler, P. (Eds.)}, date-added = {2008-03-30 20:26:04 +0200}, date-modified = {2008-03-30 20:28:52 +0200}, publisher = {Hoboken, NJ: John Wiley & Sons}, title = {Cross-cultural survey methods}, year = {2003} }
@article{Stryker:2006, abstract = {Objective To understand the psychosocial outcomes related to decision-making processes of individuals eligible for participation in clinical trials. Methods Individuals eligible to participate in selected clinical trials were contacted to complete two surveys; one shortly after participants were identified, and the second 6 weeks after the first survey was completed (N = 50). Measures included subjective informed consent; satisfaction with decision-making; decisional regret; and timing of consent (early versus late signers). ANOVA and correlation coefficients were used to test the relationships between variables. Results Early signers reported themselves to be less informed about the details of their particular clinical trials than later signers (M = 81.9 versus 91.2; F = 5.5; p = .02). There was a non-significant trend for early signers to be less satisfied with their decisions than late signers. Satisfaction with decision-making and subjective informed consent were both strongly associated with later decisional regret (r = -.32 and -.30, respectively). However, there was no relationship between timing of consent and decisional regret. Conclusion Participants who enroll in clinical trials quickly may not believe they fully understand the implications of trial participation. In general, participants who do not believe they fully understand the implications of trial participation, or who are less satisfied with their decision to enroll in the trial may ultimately feel regret about their decision to participate. Practice implications More effort is needed to ensure that clinical trial participants fully understand the risks and benefits of participation and are satisfied with their decision to enroll in a trial prior to signing consent forms.}, author = {Stryker, J.E. and Wray, R.J. and Emmons, K.M. and Winer, E. and Demetri, G.}, date-added = {2008-03-30 20:23:57 +0200}, date-modified = {2008-03-30 20:25:37 +0200}, journal = {Patient Education and Counseling}, number = {1-2}, pages = {104-109}, title = {Understanding the decisions of cancer clinical trial participants to enter research studies: Factors associated with informed consent, patient satisfaction, and decisional regret}, volume = {63}, year = {2006} }
@article{Groves:1992, author = {Groves, R.M. and Cialdini, R.B. and Couper, M.P.}, date-added = {2008-03-30 20:21:34 +0200}, date-modified = {2008-03-30 20:22:27 +0200}, journal = {Public Opinion Quarterly}, pages = {475-495}, title = {Understanding the decision to participate in a survey}, volume = {56}, year = {1992} }
@inbook{Groves:1996, author = {Groves, R.M.}, chapter = {How do we know what we think they think is really what they think?}, date-added = {2008-03-30 20:19:26 +0200}, date-modified = {2008-03-30 20:21:02 +0200}, editor = {N. Schwarz and S. Sudman}, pages = {389-402}, publisher = {San Francisco: Jossey-Bass}, title = {Answering questions: Methodology for determining cognitive and communicative processes in survey research}, year = {1996} }
@article{Groenvold:1997, author = {Groenvold, M. and Klee, M.C. and Sprangers, M.A.G. and Aaronson, N.K.}, date-added = {2008-03-30 20:16:10 +0200}, date-modified = {2008-03-30 20:17:49 +0200}, journal = {Journal of Clinical Epidemiology}, number = {4}, pages = {441-450}, title = {Validation of the EORTC QLQ-30 Quality of Life Questionnaire through combined qualitative and quantitative assessment of patient-observer agreement}, volume = {50}, year = {1997} }
@inbook{Grice:1975, author = {Grice, H.P.}, chapter = {Logic and conversation}, date-added = {2008-03-30 20:13:12 +0200}, date-modified = {2008-03-30 20:14:35 +0200}, editor = {P. Cole and J.L. Morgan}, publisher = {New York: Academic Press}, title = {Synatx and semantics: Vol. 3. Speech acts}, year = {1975} }
@inbook{Graesser:1999, author = {Graesser, A.C. and Kennedy, T. and Wiemer-Hastings, P. and Ottati, V.}, chapter = {The use of computational cognitive models to improve questions on surveys and questionnaires}, date-added = {2008-03-30 20:05:25 +0200}, date-modified = {2008-03-30 20:07:39 +0200}, editor = {M. Sirken and D. Herrmann and S. Schechter and N. Schwarz and J. Tanur and R. Tourangeau}, pages = {199-216}, publisher = {New York: John Wiley & Sons}, title = {Cognition and survey research}, year = {1999} }
@inproceedings{Gower:1998, author = {Gower, A.R. and Belanger, B. and Williams, M.J.}, booktitle = {Proceedings of the Section on Survey Research Methods, American Statistical Association}, date-added = {2008-03-30 19:57:34 +0200}, date-modified = {2008-03-30 20:04:20 +0200}, pages = {404-409}, title = {Using focus groups with respondents and interviewers to evaluate the questionnaire and interviewing procedures after the survey has taken place}, year = {1998} }
@inproceedings{Gerber:1994, abstract = {This paper examines the use of vignettes in cognitive particular living situations, and to provide us with interviews as a means of examining this evidence of the way respondents use residence terms. implicit social knowledge. Our intent was to investigate The first aim was accomplished by choosing situations in a broad range of naturally occurring terms and concepts, which we believed that respondents might have difficulty and not just those which appear in the roster questions in defining...}, author = {Gerber, E.R.}, booktitle = {Working Papers in Survey Methodology No. 94/05}, date-added = {2008-03-30 19:54:40 +0200}, date-modified = {2008-03-30 19:57:05 +0200}, editor = {Washington, DC: U.S. Census Bureau}, title = {Hidden assumptions: The use of vignettes in cognitive interviewing}, year = {1994} }
@book{Fowler:1990, abstract = {Accuracy, reliability, verifiable and error-free results - these are the goals that anyone involved in survey interviewing desires. A practical guide to producing standardized - and reliable - interviews, this volume represents a blending of social science theories of interviewing dynamics, the authors' own extensive research on interview-related error and a compilation of research evidence from other prominent methodologists. How to avoid errors, sampling design issues, question construction methods, supervision techniques, training methods and the organization of data collection staffs are all thoroughly examined. In addition, prescriptions for improving the quality of survey data results are clear and concise. Both students learning survey research methods for the first time and experienced, active researchers will find this volume indispensable.}, author = {Fowler, F.J. and Mangione, T.}, date-added = {2008-03-30 19:36:28 +0200}, date-modified = {2008-03-30 19:37:58 +0200}, publisher = {Newbury Park, CA: Sage}, title = {Standardized survey interviewing: Minimizing interviewer-related error}, year = {1990} }
@article{Fowler:1992, abstract = {Although writing clear questions is accepted as a general goal in surveys, procedures to ensure that each key term is consistently understood are not routine. Researchers who do not adequately test respondent understanding of questions must assume that ambiguity will not have a large or systematic effect on their results. Seven questions that were drawn from questions used in national health surveys were subjected to special pretest procedures and found to contain one or more poorly defined terms. When the questions were revised to clarify the definition of key terms, significantly different estimates resulted.The implication is that unclear terms are likely to produce biased estimates. The results indicate that evaluation of survey questions to identify key terms that are not consistently understood and defining unclear terms are ways to reduce systematic error in survey measurement.}, author = {Fowler, F.J.}, date-added = {2008-03-30 19:35:22 +0200}, date-modified = {2008-03-30 19:36:25 +0200}, journal = {Public Opinion Quarterly}, pages = {18-231}, title = {How unclear terms affect survey data}, volume = {56}, year = {1992} }
@article{Foddy:1998, abstract = {A growing number of survey methodologists have advocated the use of in-depth probes for identifying comprehension problems, inadequate response categories, the perspectives adopted by respondents, and the strategies respondents employ when retrieving information from memory. This article reports the results of an empirical evaluation of the relative effectiveness of a number of the probes that have been used for the first three of these tasks. This work confirms that the traditional practice of field testing a questionnaire on a subsample drawn from the population of interest is not an effective way of detecting shortcomings in survey questions. It also indicates that the effectiveness of a probe is directly related to its specificity; the most effective comprehension probes are those directed at exploring the ways in which respondents interpret key concepts; probes designed to get at respondents' interpretations of key concepts are the most effective means of identifying perspectives respondents have adopted; and well-educated respondents are most likely to make substantive responses to in-depth probes.}, author = {Foddy, W.}, date-added = {2008-03-30 17:49:23 +0200}, date-modified = {2008-03-30 17:50:37 +0200}, journal = {Sociological Methods and Research}, pages = {103-133}, title = {An empirical evaluation of in-depth probes used to pretest survey questions}, volume = {27}, year = {1998} }
@article{Ericsson:1980, abstract = {Accounting for verbal reports requires explication of the mechanisms by which the reports are generated and influenced by experimental factors. We discuss different cognitive processes underlying verbalization and present a model of how subjects, when asked to think aloud, verbalize information from their short-term memory. }, author = {Ericsson, K.A. and Simon, H.A.}, date-added = {2008-03-30 17:47:15 +0200}, date-modified = {2008-03-30 17:49:00 +0200}, journal = {Psychological Review}, pages = {215-251}, title = {Verbal reports as data}, volume = {87}, year = {1980} }
@inbook{Eisenhower:1991, author = {Eisenhower, D. and Mathiowetz, N.A. and Morganstein, D.}, chapter = {Recall error: Sources and bias reduction techniques}, date-added = {2008-03-30 17:45:18 +0200}, date-modified = {2008-03-30 17:47:00 +0200}, editor = {P. Biemer and R.M. Groves and L. Lyberg and N.A. Mathiowetz and S. Sudman}, pages = {127-144}, publisher = {New York: John Wiley & Sons}, title = {Measurement errors in sruveys}, year = {1991} }
@inbook{Edwards:1991, author = {Edwards, W.S. and Cantor, D.}, chapter = {Toward a response model in establishment surveys}, date-added = {2008-03-30 17:17:53 +0200}, date-modified = {2008-03-30 17:19:47 +0200}, editor = {P. Biemer and R.M. Groves and L. Lyberg and N.A. Mathiowetz and S. Sudman}, pages = {211-233}, publisher = {New York: John Wiley & Sons}, title = {Measurement errors in sruveys}, year = {1991} }
@article{Ongena:2007, abstract = {In this paper we provide a model of interviewer-respondent interaction in survey interviews. Our model is primarily focused on the occurrence of problems within this interaction that seem likely to affect data quality. Both conversational principles and cognitive processes, especially where they do not match the requirements of the respondent's task, are assumed to affect the course of interactions. The cognitive processes involved in answering a survey question are usually described by means of four steps: interpretation, retrieval, judgement and formatting. Each of these steps may be responsible for different overt problems, such as requests for clarification or inadequate answers. Such problems are likely to affect the course of the interaction through conversational principles which may cause, for example, suggestive behaviour on the part of the interviewer, which may in turn yield new problematic behaviours. However, the respondent may not be the only one who experiences cognitive problems; the interviewer may also have such problems, for example with respect to explaining question meaning to the respondent. Thus the model proposed here, unlike most of the other models which concentrate on the respondent, tries to incorporate cognitive processes and conversational principles with respect to both interviewer and respondent. In particular, the model looks at how cognitive processes and conversational principles affect both the interaction between interview participants and the quality of the eventual answers.}, author = {Ongena, Y.P. and Dijkstra, W.}, date-added = {2008-03-30 14:53:29 +0200}, date-modified = {2008-03-30 14:54:44 +0200}, journal = {Applied Cognitive Psychology}, number = {2}, pages = {145-163}, title = {A model of cognitive processes and conversational principles in survey interview interaction}, volume = {21}, year = {2007} }
@inbook{Dijkstra:1988, author = {Dijkstra, W. and van der Zouwen, J.}, chapter = {Types of inadequate interviewer behavior in survey interviews}, date-added = {2008-03-30 14:50:02 +0200}, date-modified = {2008-03-30 14:51:57 +0200}, editor = {W.E. Saris and I.N. Gallhafer}, pages = {24-35}, publisher = {New York: St. Martin's}, title = {Data Collection and Scaling}, year = {1988} }
@techreport{DeMaio:1993, abstract = {The Census Bureau is interested in increasing the amount of pretesting performed on the surveys it conducts and also encouraging the use of recent innovations in pretesting activities. To this end, an interdivisional committee was established within the Bureau to experiment with alternative pretesting activities for surveys in the demographic area and to produce a monograph that develops guidelines for pretesting questionnaires, and specifies a range of pretesting options based on the amount of time and resources available. This monograph covers the following pretest methods: cognitive interviewing techniques, focus groups, behavior coding and analysis, respondent debriefing, interviewer debriefing, split panel tests, and item nonresponse and response distribution analysis. It provides overviews of the methods themselves as well as a discussion of issues involved in their use (e.g., time and cost, study design, reporting of results). It also presents three case studies of the use of these methods in pretesting demographic surveys.}, author = {DeMaio, T. and Mathiowetz, N. and Rothgeb, J. and Beach, M.E. and Durant, S.}, date-added = {2008-03-30 14:47:26 +0200}, date-modified = {2008-03-30 14:49:30 +0200}, institution = {Wahsington, DC: U.S. Census Bureau}, title = {Protocol for Pretesting Demographic Surveys at the Census Bureau}, type = {Working Papers in Survey Methodology No. 93/04}, year = {1993} }
@book{Couper:1998, abstract = {The latest computer assisted methods for survey research. Computer assisted survey information collection (CASIC) methods are rapidly replacing traditional "paper and pencil" survey procedures. Researchers now apply computer technologies at every step of the survey process, from automating interviews and computerizing data collection to data capture and preparation. CASIC techniques are reshaping today's survey research and methodology ---and redefining tomorrow's. Computer Assisted Survey Information Collection is the most up-to-date and authoritative resource available on CASIC methods and issues. Its comprehensive treatment provides the scope needed to evaluate past development and implementation of CASIC designs, to anticipate its future directions, and to identify new areas for research and development. Written in an array of evidentiary styles by more than 60 leading CASIC practitioners from numerous disciplines, this coherently organized volume: - Covers CASIC development and its integration into existing designs and organizations - Discusses instrument development and design - Examines survey design issues, including the incorporation of experiments - Discusses case management of automated survey systems - Evaluates training and supervision of computer assisted interviewers - Reviews self-administered surveys, including optically scannable mail surveys - Considers emerging technologies, such as voice recognition, pen-CASIC, and the Web as a data collection tool. Supplemented with copious tables, figures, and references as well as an extensive glossary, Computer Assisted Survey Information Collection provides a solid foundation in CASIC for seasoned research-survey practitioners and graduate students across a broad spectrum of social science disciplines. }, author = {Couper, M.P. and Baker, R.P. and Bethlehem, J. and Clark, C.Z.F. and Martin, J. and Nicholls, W.L.}, date-added = {2008-03-30 14:43:04 +0200}, date-modified = {2008-03-30 14:44:55 +0200}, publisher = {New York: John Wiley & Sons}, title = {Computer assisted survey information collection}, year = {1998} }
@article{Collins:2003, abstract = {This article puts forward the case that survey questionnaires, which are a type of measuring instrument, can and should be tested to ensure they meet their purpose. Traditionally survey researchers have been pre-occupied with 'standardising' data collection instruments and procedures such as question wording and have assumed that experience in questionnaire design, coupled with pilot testing of questionnaires, will then ensure valid and reliable results. However, implicit in the notion of standardisation are the assumptions that respondents are able to understand the questions being asked, that questions are understood in the same way by all respondents, and that respondents are willing and able to answer such questions. The development of cognitive question testing methods has provided social researchers with a number of theories and tools to test these assumptions, and to develop better survey instruments and questionnaires. This paper describes some of these theories and tools, and argues that cognitive testing should be a standard part of the development process of any survey instrument.}, author = {Collins, D.}, date-added = {2008-03-30 14:40:38 +0200}, date-modified = {2008-03-30 14:42:04 +0200}, journal = {Quality of Life Research}, number = {3}, pages = {229-238}, title = {Pretesting survey instruments: An overview of cognitive methods}, volume = {12}, year = {2003} }
@article{Clark:1989, abstract = {For people to contribute to discourse, they must do more than utter the tight sentence at the right time. The basic requirement is that they add to their common ground in an orderly way. To do this, we argue, they try to establish for each utterance the mutual belief that the addressees have understood what the speaker meant well enough for current purposes. We present a model of contributions and show how it accounts for a variety of features of everyday conversations}, author = {Clark, H.H. and Schaeffer, E.F.}, date-added = {2008-03-30 14:14:43 +0200}, date-modified = {2008-03-30 14:17:07 +0200}, journal = {Cognitive Science}, pages = {259-294}, title = {Contributing to discourse}, volume = {13}, year = {1989} }
@article{Campanelli:1991, abstract = {This paper describes the use of specially designed debriefing studies as a way to explore respondent and interviewer components of response error in survey data. Such studies are useful in developing new questionnaires as well as identifying sources of error in existing questionnaires. This discussion is illustrated through recent work conducted as the US Bureau of the Census, in conjunction with the US Bureau of Labor Statistics, to redesign the Current Population Survey questionnaire.}, author = {Campanelli, P. and Martin, E. and Rothgeb, J.M.}, date-added = {2008-03-30 13:49:46 +0200}, date-modified = {2008-03-30 13:52:12 +0200}, journal = {The Statistician}, pages = {253-264}, title = {The use of respondent and interviewer debriefing studies as a way to study response error in survey data}, volume = {40}, year = {1991} }
@article{Burton:1991, author = {Burton, S. and Blair, E.}, date-added = {2008-03-30 13:46:46 +0200}, date-modified = {2008-03-30 13:47:45 +0200}, journal = {Public Opinion Quarterly}, pages = {50-79}, title = {Task conditions, response formulation processes, and response accuracy for behavioral frequency questions in surveys}, volume = {55}, year = {1991} }
@article{Bolton:1993, abstract = {Conventional questionnaire pretesting methods focus on directly identifying question defects, such as an ambiguous question. This paper proposes a new method that identifies respondents' cognitive difficulties as they form answers to survey questions. It entails a content analysis of concurrent verbal protocols elicited during pretest interviews. The effectiveness of the methodology is illustrated with pretests of multiple versions of the same survey. The results are used to illustrate how this method yields diagnostic information about questionnaire problems and improvements. Then, the results are compared with the results of observational monitoring by managers. The findings indicate that a questionnaire pretesting methodology that quantifies respondents' cognitive difficulties is a useful enhancement for identifying and "improving" defective questions.}, author = {Bolton, R.N.}, date-added = {2008-03-30 13:44:50 +0200}, date-modified = {2008-03-30 13:46:04 +0200}, journal = {Marketing Science}, pages = {280-303}, title = {Pretesting questionnaires: Content analysis of respondents' concurrent verbal protocols}, volume = {12}, year = {1993} }
@article{Blair:1987, author = {Blair, E.A. and Burton, S.}, date-added = {2008-03-30 13:41:48 +0200}, date-modified = {2008-03-30 13:42:54 +0200}, journal = {Journal of Consumer Research}, pages = {280-288}, title = {Cognitive processes used by survey respondents in answering behavioral frequency questions}, volume = {14}, year = {1987} }
@article{Biemer:1992, abstract = {"The [U.S.] Current Population Survey (CPS) reinterview sample consists of two subsamples: (a) a sample of CPS households is reinterviewed and the discrepancies between the reinterview responses and the original interview responses are reconciled for the purpose of obtaining more accurate responses..., and (b) a sample of CPS households, nonoverlapping with sample (a), is reinterviewed 'independently' of the original interview for the purpose of estimating simple response variance (SRV). In this article a model and estimation procedure are proposed for obtaining estimates of SRV from subsample (a) as well as the customary estimates of SRV from subsample (b)....Data from the CPS reinterview program for both subsamples (a) and (b) are analyzed both (1) to illustrate the methodology and (2) to check the validity of the CPS reinterview data. Our results indicate that data from subsample (a) are not consistent with the data from subsample (b) and provide convincing evidence that errors in subsample (a) are the source of the inconsistency." [PubMed excerpt]}, author = {Biemer, P.P. and Forsman, G.}, date-added = {2008-03-30 13:39:32 +0200}, date-modified = {2008-03-30 13:41:01 +0200}, journal = {Journal of the American Statistical Association}, number = {420}, pages = {915-923}, title = {On the quality of reinterview data with application to the current population survey}, volume = {87}, year = {1992} }
@article{Beatty:2007, abstract = {Cognitive interviewing has emerged as one of the more prominent methods for identifying and correcting problems with survey questions. We define cognitive interviewing as the administration of draft survey questions while collecting additional verbal information about the survey responses, which is used to evaluate the quality of the response or to help determine whether the question is generating the information that its author intends. But beyond this general categorization, cognitive interviewing potentially includes a variety of activities that may be based on different assumptions about the type of data that are being collected and the role of the interviewer in that process. This synthesis reviews the range of current cognitive interviewing practices, focusing on three considerations: (1) what are the dominant paradigms of cognitive interviewing---what is produced under each, and what are their apparent advantages; (2) what key decisions about cognitive interview study design need to be made once the general approach is selected (e.g., who should be interviewed, how many interviews should be conducted, and how should probes be selected), and what bases exist for making these decisions; and (3) how cognitive interviewing data should be evaluated, and what standards of evidence exist for making questionnaire design decisions based on study findings. In considering these issues, we highlight where standards for best practices are not clearly defined, and suggest broad areas worthy of additional methodological research.}, author = {Beatty, P.C. and Willis, G.B.}, date-added = {2008-03-30 13:34:55 +0200}, date-modified = {2008-03-30 13:35:44 +0200}, journal = {Public Opinion Quarterly}, title = {Research Synthesis: The Practice of Cognitive Interviewing}, year = {2007} }
@article{Thornberry:1987, address = {US Department of Health and Human Services, Public Health Service, National Center for Health Statistics, Division of Health Interview Statistics, Hyattsville, Maryland 20782, USA.}, au = {Thornberry OT, Jr}, author = {Thornberry, O T Jr}, da = {20050328}, date-added = {2008-03-30 13:30:00 +0200}, date-modified = {2008-03-30 13:30:05 +0200}, dcom = {20050421}, edat = {1987/08/01 00:00}, issn = {0083-2057 (Print)}, jid = {0330122}, journal = {Vital Health Stat 2}, jt = {Vital and health statistics. Series 2, Data evaluation and methods research}, language = {eng}, lr = {20071115}, mh = {Bias (Epidemiology); Data Collection/*methods/standards; *Health Surveys; Interviews as Topic/*methods/standards; National Center for Health Statistics (U.S.)/organization \& administration; Organizational Objectives; Questionnaires/standards; Research Design/standards; *Telephone; United States}, mhda = {2005/04/22 09:00}, number = {106}, own = {NLM}, pages = {1--4}, pl = {United States}, pmid = {15791749}, pst = {ppublish}, pt = {Clinical Trial; Comparative Study; Journal Article; Randomized Controlled Trial}, pubm = {Print}, sb = {IM}, so = {Vital Health Stat 2. 1987 Aug;(106):1-4. }, stat = {MEDLINE}, title = {An experimental comparison of telephone and personal health interview surveys.}, year = {1987} }
@article{Bercini:1992, author = {Bercini, D.H.}, date-added = {2008-03-30 13:27:26 +0200}, date-modified = {2008-03-30 13:30:05 +0200}, journal = {Journal of Exposure Analysis and Environmental Epidemiology}, pages = {241-248}, title = {Pretesting questionnaires in the laboratory: An alternative approach}, volume = {2}, year = {1992} }
@article{Mulligan:2003, abstract = {In public opinion research, response latency is a measure of attitude accessibility, which is the ease or swiftness with which an attitude comes to mind when a respondent is presented with a survey question. Attitude accessibility represents the strength of the association in memory between an attitude object and an evaluation of the object. Recent research shows that attitude accessibility, as measured by response latency, casts light on a wide range of phenomena of public opinion and political behavior. We discuss response latency methodology for survey research and advocate the use of latent response latency timers (which are invisible both to respondents and interviewers) as a low cost, low-maintenance alternative to traditional methods of measuring response latency in public opinion surveys. We show that with appropriate model specification latent response latency timers may provide a suitable alternative to the more complicated and expensive interviewer-activated timers.}, author = {Mulligan, K. and Grant, J.T. and Mockabee, S.T. and Monson, J.Q.}, date-added = {2008-03-30 13:18:55 +0200}, date-modified = {2008-03-30 13:20:21 +0200}, journal = {Political Analysis}, number = {3}, pages = {289-301}, title = {Response Latency Methodology for Survey Research: Measurement and Modeling Strategies}, volume = {11}, year = {2003} }
@article{Bassili:1996, author = {Bassili, J.N. and Scott, B.S.}, date-added = {2008-03-30 13:10:42 +0200}, date-modified = {2008-03-30 13:48:44 +0200}, journal = {Public Opinion Quarterly}, pages = {390-399}, title = {Response latency as a signal to question problems in survey research}, volume = {60}, year = {1996} }
@book{Aday:2006, abstract = {Designing and Conducting Health Surveys is written for students, teachers, researchers, and anyone who conducts health surveys. This third edition of the standard reference in the field draws heavily on the most recent methodological research on survey design and the rich storehouse of insights and implications provided by cognitive research on question and questionnaire design in particular. This important resource presents a total survey error framework that is a useful compass for charting the dangerous waters between systematic and random errors that inevitably accompany the survey design enterprise. In addition, three new studies based on national, international, and state and local surveys---the UNICEF Multiple Indicator Cluster Surveys, California Health Interview Survey, and National Dental Malpractice Survey---are detailed that illustrate the range of design alternatives available at each stage of developing a survey and provide a sound basis for choosing among them.}, author = {Aday, L.A. and Cornelius, L.J.}, date-added = {2008-03-30 13:01:07 +0200}, date-modified = {2008-03-30 13:01:52 +0200}, edition = {3rd}, publisher = {Jossey-Bass}, title = {Designing and Conducting Health Surveys: A Comprehensive Guide}, year = {2006} }
@book{Aday:1996, abstract = {The book contains sixteen well-written and carefully reasoned chapters that chart the odyssey of a project from conceptualization to publication. The first four chapters cover issues in the general design of a survey, including the formulation of research objectives and specific questions and the development of an analysis plan. Chapters 5--7 cover methodological decisions regarding choices for data collection and the issues in the design and evaluation of sampling plans. Chapters 8--12 focus on the formulation of questions and the construction of the questionnaire. The next two chapters discuss the management of surveys and the preparation of the data for computer entry. Chapter 15 discusses analytical approaches to survey data, and the final chapter completes the cycle by describing effective ways for preparing research reports. [from Journal of Health Politics, Policy and Law 1990 15(3):685-686; DOI:10.1215/03616878-15-3-685]}, author = {Aday, L.A.}, date-added = {2008-03-30 12:47:55 +0200}, date-modified = {2008-03-30 13:03:32 +0200}, edition = {2nd}, publisher = {San Francisco: Jossey-Bass}, title = {Designing and conducting health surveys}, year = {1996} }
This file was generated by bibtex2html 1.91.