[HSS8123] – Rolling Alphabet/ Project Documentation

Posted in Uncategorized on September 6th, 2016 by Yue Wang

https://www.youtube.com/watch?v=8cDeZoaUohs

[HSS8123] – Rolling Alphabet/ code

Posted in Uncategorized on September 6th, 2016 by Yue Wang

Basic code from TUIO library, main conncet support from reacTIVISion app.

Code develop help from Tom and Xiyuan.
import TUIO.*;
import processing.sound.*;
TuioProcessing tuioClient;
float cursor_size = 15;
float object_size = 60;
float table_size = 760;
float scale_factor = 1;
PFont font;
boolean verbose = false;
boolean callback = true;
///——–IMPORT IMAGES——–/////
PImage background;
PImage veraA;
PImage veraB;
PImage veraC;
PImage veraD;
PImage veraE;
PImage veraF;
PImage veraG;
PImage veraH;
PImage veraI;
PImage veraJ;
PImage veraK;
PImage veraL;
PImage veraM;
PImage veraN;
PImage veraO;
PImage veraP;
PImage veraQ;
PImage veraR;
PImage veraS;
PImage veraT;
PImage veraU;
PImage veraV;
PImage veraW;
PImage veraX;
PImage veraY;
PImage veraZ;
///——-IMPORT SOUND—-////
SoundFile veraAsound;
SoundFile veraBsound;
SoundFile veraCsound;
SoundFile veraDsound;
SoundFile veraEsound;
SoundFile veraFsound;
SoundFile veraGsound;
SoundFile veraHsound;
SoundFile veraIsound;
SoundFile veraJsound;
SoundFile veraKsound;
SoundFile veraLsound;
SoundFile veraMsound;
SoundFile veraNsound;
SoundFile veraOsound;
SoundFile veraPsound;
SoundFile veraQsound;
SoundFile veraRsound;
SoundFile veraSsound;
SoundFile veraTsound;
SoundFile veraUsound;
SoundFile veraVsound;
SoundFile veraWsound;
SoundFile veraXsound;
SoundFile veraYsound;
SoundFile veraZsound;
///——-SOUND SWITCHES—//////
boolean Asound = false;
boolean Bsound = false;
boolean Csound = false;
boolean Dsound = false;
boolean Esound = false;
boolean Fsound = false;
boolean Gsound = false;
boolean Hsound = false;
boolean Isound = false;
boolean Jsound = false;
boolean Ksound = false;
boolean Lsound = false;
boolean Msound = false;
boolean Nsound = false;
boolean Osound = false;
boolean Psound = false;
boolean Qsound = false;
boolean Rsound = false;
boolean Ssound = false;
boolean Tsound = false;

boolean Usound = false;
boolean Vsound = false;
boolean Wsound = false;

boolean Xsound = false;
boolean Ysound = false;
boolean Zsound = false;
float tobjNo = -1;
float symbolFoundThisTime = -1;
void setup()
{

noCursor();

size(displayWidth,displayHeight);
noStroke();
fill(0);

if (!callback) {
frameRate(60);
loop();
} else noLoop();

font = createFont(“Arial”, 18);
scale_factor = height/table_size;

tuioClient = new TuioProcessing(this);

////—–LOAD IMAGES——////
background = loadImage(“background.jpg”);
veraA = loadImage(“p1.jpg”);
veraB = loadImage(“p2.jpg”);
veraC = loadImage(“p3.jpg”);
veraD = loadImage(“p4.jpg”);
veraE = loadImage(“p5.jpg”);
veraF = loadImage(“p6.jpg”);
veraG = loadImage(“p7.jpg”);
veraH = loadImage(“p8.jpg”);
veraI = loadImage(“p9.jpg”);
veraJ = loadImage(“p10.jpg”);
veraK = loadImage(“p11.jpg”);
veraL = loadImage(“p12.jpg”);
veraM = loadImage(“p13.jpg”);
veraN = loadImage(“p14.jpg”);
veraO = loadImage(“p15.jpg”);
veraP = loadImage(“p16.jpg”);
veraQ = loadImage(“p17.jpg”);
veraR = loadImage(“p18.jpg”);
veraS = loadImage(“p19.jpg”);
veraT = loadImage(“p20.jpg”);
veraU = loadImage(“p21.jpg”);
veraV = loadImage(“p22.jpg”);
veraW = loadImage(“p23.jpg”);
veraX = loadImage(“p24.jpg”);
veraY = loadImage(“p25.jpg”);
veraZ = loadImage(“p26.jpg”);

///——LOAD SOUND—-/////
veraAsound = new SoundFile(this, “A.mp3”);
veraBsound = new SoundFile(this, “B.mp3”);
veraCsound = new SoundFile(this, “C.mp3”);
veraDsound = new SoundFile(this, “D.mp3”);

veraEsound = new SoundFile(this, “E.mp3”);
veraFsound = new SoundFile(this, “F.mp3”);
veraGsound = new SoundFile(this, “G.mp3”);
veraHsound = new SoundFile(this, “H.mp3”);
veraIsound = new SoundFile(this, “I.mp3”);
veraJsound = new SoundFile(this, “J.mp3”);
veraKsound = new SoundFile(this, “K.mp3”);
veraLsound = new SoundFile(this, “L.mp3”);
veraMsound = new SoundFile(this, “M.mp3”);
veraNsound = new SoundFile(this, “N.mp3”);
veraOsound = new SoundFile(this, “O.mp3”);
veraPsound = new SoundFile(this, “P.mp3”);
veraQsound = new SoundFile(this, “Q.mp3”);
veraRsound = new SoundFile(this, “R.mp3”);
veraSsound = new SoundFile(this, “S.mp3”);
veraTsound = new SoundFile(this, “T.mp3”);
veraUsound = new SoundFile(this, “U.mp3”);
veraVsound = new SoundFile(this, “V.mp3”);
veraWsound = new SoundFile(this, “W.mp3”);
veraXsound = new SoundFile(this, “X.mp3”);
veraYsound = new SoundFile(this, “Y.mp3”);
veraZsound = new SoundFile(this, “Z.mp3”);
}
void draw()
{
textFont(font,18*scale_factor);
float obj_size = object_size*scale_factor;
float cur_size = cursor_size*scale_factor;
ArrayList<TuioObject> tuioObjectList = tuioClient.getTuioObjectList();
for (int i=0;i<tuioObjectList.size();i++) {
TuioObject tobj = tuioObjectList.get(i);
symbolFoundThisTime = tobj.getSymbolID();
}
println(symbolFoundThisTime);
if(symbolFoundThisTime == tobjNo){
}
else{
chooseAndStopSample(tobjNo);
chooseAndPlaySample(symbolFoundThisTime);
tobjNo = symbolFoundThisTime;
}
chooseAndDrawImage(symbolFoundThisTime);

}
void addTuioObject(TuioObject tobj) {
if (verbose) println(“add obj “+tobj.getSymbolID()+” (“+tobj.getSessionID()+”) “+tobj.getX()+” “+tobj.getY()+” “+tobj.getAngle());
}
void chooseAndDrawImage(float tobjNo){
if (tobjNo == -1){
image(background, 0,0,width,height);
}
else if(tobjNo == 0){
image(veraA, 0,0,width,height);
}
else if(tobjNo == 1){
image(veraB, 0,0,width,height);
}
}
void chooseAndPlaySample(float tobjNo){
if(tobjNo == 0){
image(veraA, 0,0,width,height);
veraAsound.play();
}
else if(tobjNo == 1){
image(veraB, 0,0,width,height);
veraBsound.play();
}
else if(tobjNo == 2){
image(veraC, 0,0,width,height);
veraCsound.play();
}
else if(tobjNo == 3){
image(veraD, 0,0,width,height);
veraDsound.play();
}
else if(tobjNo == 4){
image(veraE, 0,0,width,height);
veraEsound.play();
}
else if(tobjNo == 5){
image(veraF, 0,0,width,height);
veraFsound.play();
}

else if(tobjNo == 6){
image(veraG, 0,0,width,height);
veraGsound.play();
}
else if(tobjNo == 7){
image(veraH, 0,0,width,height);
veraHsound.play();
}
else if(tobjNo == 8){
image(veraI, 0,0,width,height);
veraIsound.play();
}
else if(tobjNo == 9){
image(veraJ, 0,0,width,height);
veraJsound.play();
}
else if(tobjNo == 10){
image(veraK, 0,0,width,height);
veraKsound.play();
}
else if(tobjNo == 11){
image(veraL, 0,0,width,height);
veraLsound.play();
}
else if(tobjNo == 12){
image(veraM, 0,0,width,height);
veraMsound.play();
}
else if(tobjNo == 13){
image(veraN, 0,0,width,height);
veraNsound.play();
}
else if(tobjNo == 14){
image(veraO, 0,0,width,height);
veraOsound.play();
}
else if(tobjNo == 15){
image(veraP, 0,0,width,height);
veraPsound.play();
}
else if(tobjNo == 16){
image(veraQ, 0,0,width,height);
veraQsound.play();
}

else if(tobjNo == 17){
image(veraR, 0,0,width,height);
veraRsound.play();
}
else if(tobjNo == 18){
image(veraS, 0,0,width,height);
veraSsound.play();
}
else if(tobjNo == 19){
image(veraT, 0,0,width,height);
veraTsound.play();
}
else if(tobjNo == 20){
image(veraU, 0,0,width,height);
veraUsound.play();
}
else if(tobjNo == 21){
image(veraV, 0,0,width,height);
veraVsound.play();
}
else if(tobjNo == 22){
image(veraW, 0,0,width,height);
veraWsound.play();
}

else if(tobjNo == 23){
image(veraX, 0,0,width,height);
veraXsound.play();
}

else if(tobjNo == 24){
image(veraY, 0,0,width,height);
veraYsound.play();
}
else if(tobjNo == 25){
image(veraZ, 0,0,width,height);
veraZsound.play();
}
}
void chooseAndStopSample(float tobjNo){
if(tobjNo == 0){
veraAsound.stop();////stops the file many times instead of once. how to solve?
}
else if(tobjNo == 1){
veraBsound.stop();
}
else if(tobjNo == 2){
veraCsound.stop();
}
else if(tobjNo == 3){
veraDsound.stop();
}
else if(tobjNo == 4){
veraEsound.stop();
}
else if(tobjNo == 5){
veraFsound.stop();
}
else if(tobjNo == 6){
veraGsound.stop();
}
else if(tobjNo == 7){
veraHsound.stop();
}
else if(tobjNo == 8){
veraIsound.stop();
}
else if(tobjNo == 9){
veraJsound.stop();
}
else if(tobjNo == 10){
veraKsound.stop();
}
else if(tobjNo == 11){
veraLsound.stop();
}
else if(tobjNo == 12){
veraMsound.stop();
}
else if(tobjNo == 13){
veraNsound.stop();
}
else if(tobjNo == 14){
veraOsound.stop();
}
else if(tobjNo == 15){
veraPsound.stop();
}
else if(tobjNo == 16){
veraQsound.stop();
}
else if(tobjNo == 17){
veraRsound.stop();
}
else if(tobjNo == 18){
veraSsound.stop();
}
else if(tobjNo == 19){
veraTsound.stop();
}
else if(tobjNo == 20){
veraUsound.stop();
}
else if(tobjNo == 21){
veraVsound.stop();
}
else if(tobjNo == 22){
veraWsound.stop();
}
else if(tobjNo == 23){
veraXsound.stop();
}
else if(tobjNo == 24){
veraYsound.stop();
}
else if(tobjNo == 25){
veraZsound.stop();
}
}
void updateTuioObject (TuioObject tobj) {
if (verbose) println(“set obj “+tobj.getSymbolID()+” (“+tobj.getSessionID()+”) “+tobj.getX()+” “+tobj.getY()+” “+tobj.getAngle()
+” “+tobj.getMotionSpeed()+” “+tobj.getRotationSpeed()+” “+tobj.getMotionAccel()+” “+tobj.getRotationAccel());
}
void removeTuioObject(TuioObject tobj) {
if (verbose) println(“del obj “+tobj.getSymbolID()+” (“+tobj.getSessionID()+”)”);
}
void addTuioCursor(TuioCursor tcur) {
if (verbose) println(“add cur “+tcur.getCursorID()+” (“+tcur.getSessionID()+ “) ” +tcur.getX()+” “+tcur.getY());
}
void updateTuioCursor (TuioCursor tcur) {
if (verbose) println(“set cur “+tcur.getCursorID()+” (“+tcur.getSessionID()+ “) ” +tcur.getX()+” “+tcur.getY()
+” “+tcur.getMotionSpeed()+” “+tcur.getMotionAccel());
}
void removeTuioCursor(TuioCursor tcur) {
if (verbose) println(“del cur “+tcur.getCursorID()+” (“+tcur.getSessionID()+”)”);
}
void addTuioBlob(TuioBlob tblb) {
if (verbose) println(“add blb “+tblb.getBlobID()+” (“+tblb.getSessionID()+”) “+tblb.getX()+” “+tblb.getY()+” “+tblb.getAngle()+” “+tblb.getWidth()+” “+tblb.getHeight()+” “+tblb.getArea());
}

void updateTuioBlob (TuioBlob tblb) {
if (verbose) println(“set blb “+tblb.getBlobID()+” (“+tblb.getSessionID()+”) “+tblb.getX()+” “+tblb.getY()+” “+tblb.getAngle()+” “+tblb.getWidth()+” “+tblb.getHeight()+” “+tblb.getArea()
+” “+tblb.getMotionSpeed()+” “+tblb.getRotationSpeed()+” “+tblb.getMotionAccel()+” “+tblb.getRotationAccel());
}
void removeTuioBlob(TuioBlob tblb) {
if (verbose) println(“del blb “+tblb.getBlobID()+” (“+tblb.getSessionID()+”)”);
}
void refresh(TuioTime frameTime) {
if (verbose) println(“frame #”+frameTime.getFrameID()+” (“+frameTime.getTotalMilliseconds()+”)”);
if (callback) redraw();
}

[HSS8123] – Rolling Alphabet/experience table and model box

Posted in Uncategorized on September 6th, 2016 by Yue Wang

Processed with VSCO with a5 preset Processed with VSCO with a5 preset Processed with VSCO with a5 preset Processed with VSCO with a5 preset

IMG_3474

IMG_3764 IMG_3765 IMG_3766 IMG_3768 Processed with VSCO with a5 preset QQ图片20160825143904

[HSS8123] – Rolling Alphabet/model post

Posted in Uncategorized on September 6th, 2016 by Yue Wang

background 2 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 p16 p17 p18 p19 p20 p21 p22 p23 p24 p25 p26

[HSS8123] – Rolling Alphabet/sounds for each alphabet

Posted in Uncategorized on September 6th, 2016 by Yue Wang

26 pieces of sounds from 26 different people’s vioce that talk about their stories, feeling or thinking about each alphabet: music, poem, special vox, and so on.

I collected them and record at my alphabets, when people move the model, hear the sound.

[HSS8121] – Presentation document

Posted in Uncategorized on June 9th, 2016 by Yue Wang

幻灯片1 幻灯片2 幻灯片3 幻灯片4 幻灯片5 幻灯片6 幻灯片7 幻灯片8 幻灯片9 幻灯片10 幻灯片11 幻灯片12 幻灯片13 幻灯片14 幻灯片15

[HSS 8121] – Research on the interrelationships between facial expression, colours, and emotion Research Proposal

Posted in Uncategorized on June 9th, 2016 by Yue Wang

This proposal is composed of seven sections, the first part is introduces briefly what I would propose to do in the research, what kind of other research has been conduced in this area, and the general overview for this proposal. The second section mainly focus on the define the research questions, issues or problems, it lists the what questions I want to find answer of this study. Such as different facial expression link with basic emotion (happiness, sad, surprise, disgust, angry and fear) can be describe for what color or if we use opposite colors add in facial expression dose it also meaning original human emotion? The third part of includes the aim and objectives of this research proposal, which explore the target that I want to be achieved and why I want to research it.

The fourth section discusses the what research method I should employ, for example in the class we have learned four research method: Ethnography, Practice based research, Video analysis and Media archaeology, and why these are the particular approach chosen. The fifth section describes the literature review and background studies, what other research is being conduced in this area. The sixth section analysis on what knowledge I learned from this research proposal and used into my “LATES” events design for “In Your Face” exhibition. Finally, the last section concern about the conclusion and future development of this research proposal. Summary of whole proposal and review which problem I can answer or which question still need thinking about.

Problem Formulation

The first important question in this research proposal should be addressed is what kind of interrelationships between facial expression, colours, and emotion? As Cole and Moore (2014) points out, particular at infant facial activity express specific emotions, at early social interaction, without talk, facial expression have important relationship with emotion. Form other point of view, Gil and Le Bigot (2015) has examined that effect of colour background on the processing of stimuli is important factor in human interactions. Moreover, Geovany, Olac and Stephen, et al (2014) argues that human display emotion through different channels such as head poses, bodily gestures, head poses, speech prosody, and facial expression, but also through physiological signals, such as colours changed stimulate. Different colour might be evoke some specific human emotions, like red evoked people feel happiness, black evoked people feel sadness or fear. Some colours predict human emotions higher than other colours, such as when people look at red, the emotion effect higher than people look at blue. Color & Paints Interim Meeting of the International Color Association ( 2004) has reported that color can relate our emotion and feeling, matching colours (such as red, blue) to a certain number of emotions (such as angry, sadness), the data about emotions reaction to colours will be assessment. However, those research all study about two elements relationships whatever between facial expression and colours, between colours and emotion, or between facial expression and emotion. If three elements together appears, what kind of interrelationships they have?

 

The second question is how the human face recognition system to identify people’s facial expressions and emotions? Does people’s facial expressions eventually express the content is person’s emotion? For example, Barrett, Mesquita and Gendron (2011) has shown that facial expression is one of the main external form of expression of emotion, in order to guide the individual behavior of social situation have suitable interaction, people need correctly extract the corresponding emotional information from other people’s face. In real life, the facial expression maybe not happen individual, it is always associated with the particular situation and with particular emotional appeal. If we want to accurately identify the emotional state and to gained real meaning by the face expression of other peoples, we must identify the current emotional situation and integrating it with facial expressions.

The third question is does opposite colours will create mood illusion for face recognition? This question possible hardly to resolve, because I have not find some research suggestion correctly talk about opposite colours and mood illusion for human face. Just found some experiment from Yong, Sudirman and Chew (2012) investigate that the interrelationship between colours and facial expressions in order to realize the result acquired in psychology, art and design.

Research aim and objective

The first aim of the research proposal are try to solve the three questions above-mentioned, understanding the knowledge relating to the questions, and seeking to enhance knowledge from other people’s research.

The second aim of the research proposal are use the knowledge I researched to enhance my “LATES” events design for “In Your Face” exhibition. If I known the relationship between facial expression, emotion and colours and each factor’s specific function, it is possible I can improve my events activity design’s quality. For example,  reasonable arrangement for each factor in my design make it more effectual, in order to enhance audience events activity experience, which part of design should use colours change to guide audience’s facial expression change and create different emotion.

The reason why I attempting to do this research is I really interesting in facial expression, emotion and colours. In last year, I have a photography work named ‘ Half water, Half fire’, this photography works shown the opposite colours covers people’s image, when the colours effected on image, the photo’s original emotion will have change, even the people’s facial expression in the image have not change, but you will have totally different feeling with two same photo. Also, I think opposite colours can enhance audience’s feeling and emotion when they see the different facial expression on photo. But when I done this work in last year, I do not know this kind of knowledge, this research remind me rethinking about this project and want to find the develop for this practice based on research. Moreover, if change this photography work to be a events activities, what interaction I want to make with audience, how did I to redesign this project? So I really want to research on this area and find the knowledge improve design.

Research method

From HSS8121: Enterprise and Research Methods module, we have learned four research method. The first research method is Ethnography, as Hammersley and Atkinson (1983) point out that ethnography is an significant approaches using with social research today. In the early time, ‘ethnography’ was often seen as complementary, which referred to historicial and comparative analysis of no-Western societies and cultures. After that time, ‘ethnography’ refer to an integration of two part of, one is the first- hand empirical investigation and the theoretical, another is the comparative interpretation of social organization and culture.

Moreover, what ethnographyers should do? In class John has mentioned that Ethnography have three factors, firstly, Features include naturally occurring settings, ordinary activities, social meanings, the perspective of the participants, member categories, descriptive, participant observation and reflexivity. Secondly, Data to be covered: 1. Multiple data sources. 2. Interviews, observation, note-taking, video and audio, documents and artefacts. 3. Coding. 4. Qualitative, interpretative analysis. Thirdly, Disciplines about anthropology, sociology, concern for the underdog, and ethnography in CSCW, HCI and design.

In this question – what ethnographyers do? – Hammersley and Atkinson (1983) has more details mentioned, for example, they think in term of data collection, ethnography regular involves the researcher participating, overtly and covertly. In the everyday life, listening to what is said, watching what happen, or asking question through informal and formal interviews, collecting documents and artefacts.

The second research method is video analysis. When we use this research method, we need think about uses, features, conversation and video analysis. Different from other traditional research method such as interviews and questionnaires, as Heath et al. (2010) concerns that the research method for video analysis need taking the participant’s points earnestly and the resource from their dayliy activities. For example, when audiences presenting these of data, need pay attention to the selection of relevant examples, multiple fragments, presenting single and the graphical representation devices. For research method video analysis, we should not only focus on uses evaluative, formative, rethinking ‘interaction’ and related concepts, but also concerns about data transcription, look for phenomena of interest, and qualitative, interpretative analysis. Moreover, we need think about how activities are socially organised through speech, body movement, gesture and engagement with objects. When we do the recordings, maybe use multi-camera, shot a broad static, not the same as promotional or documentary video.

The third research method is Practice Based Research, this one can be more easy understand than Ethnography and video analysis.

The fourth research method is Media Archaeology. As Lovink (2003) argues that media archaeology is first and foremost a methodology, a hermeneutic reading of the “new” against the grain of the past, rather than a telling of the history of technologies from past to present. This research method might be use in electronic practice, art practice or pedagogy.

Following those four research methods, I think the research method Ethnography, Video analysis  and Practice based research I should employ. The reason I chosen Ethnography because in this research proposal, the interrelationships between facial expression, colours, and emotion, particularly I need participation, overtly and covertly, watching what happen in my “LATES” events design for “In Your Face” exhibition, collect the data from audience’s reaction. Moreover, I should combine with the research method Practice based research, to form a positive circle. For example, do research, practice, deeply research main question from practice, found the answer and practice again, participant observation practice activities, then collect data and analysis, found the new problem and research again to solved. The reason I chosen the research method Video analysis because the main basic design for this research is a photography work, I need use this method to analysis multiple fragments and take the graohical representation. In addition, rethinking ‘interaction’ and concerns about the photo meaning transcription.

 

Research review

Yong, Sudirman and Chew (2012) have an experiment about colour perception on facial expression towards emotion, the experiment used emotion wheel and six universal emotion (happiness, surprise, anger, disgust, fear, and sad). The experiment ask participants select a single colour for the each of the seven faces. Face is expressive of the six basic emotions and the neutral, the six emotions are not named (Fig.1).

图1片2

2图片2 12图片2 122图片2

Source: Yong, C., Sudirman, R. and Chew, K. (2012). Colour Perception on Facial Expression towards Emotion. TELKOMNIKA (Telecommunication Computing Electronics and Control), 10(4), p.783.

The whole experiment data results shown the concordance among the observed expression of human emotions, colours and their relationships. If I can collect more picture have various emotions more subtle and emotional, it might be more clear about the answer. A valuable color accessories at facial expression picture maybe can find more data form audience emotional state, feeling, emotions, mood, cognition and connotations. Form this experiment, as Yong, Sudirman and Chew (2012) point out that design should select colours to be more objective, each of the colour represents the consensus of the perception are different, designer can used this point to guide their works.