eHouse Smart Home voice control from Android

Smart House eHouse Voice Control of Android

Smart Home eHouse can be acoustically controlled android using software libraries eHouse android platform and built-in analyzer ( recognition ) speech .

This is done using the built-in system android speech analyzer , apparatus for speech recognition ( speech recognition ) .
This article describes how in the dozens of lines of source code to run acoustic control system eHouse with smartphones or tablet Android .

In this example, the speech analyzer for eHouse system starts with a short press of the search hardware button
(Search) . Long press the hardware associated with searching the web browser .

Acoustic control works in the following way:

  • Start the speech analyzer ( recognition ) briefly pressing
  • Shows a dialog box for the speech recognition device name
     Intelligent home control eHouse acoustic speech recognition - device name
  • Loudly and clearly pronounce the name of the driver eHouse system .
  • Speech Recognition compares the recorded command with a list of devices on the form ” RunEvent ” in the case of finding the right equipment goes to the next step (otherwise interrupt ) .
  • Shows a dialog box speech recognition for the event name for the selected device .
     Intelligent home control eHouse acoustic speech recognition - name of the event
  • The comparison is processed by analyzing the 3 methods (comparison accurate , comparing the output name and comparison of partial { content as a result of speech recognition in the event list } ) . In the case of finding the right event to the next step .
  • Shows the dialog speech recognition commands for the transmission method with system eHouse (WIFI , SMS , INTERNET , EMAIL )
     Intelligent home control eHouse acoustic speech recognition - the choice of method of transmission events
  • Answer acoustic analyzer of speech is compared tables containing the phonetic text for each transmission method . Add an event and automatically sent for full compliance . In the case of partial compliance of the upcoming event name event is selected from the list , However, it is not sent .  Intelligent home control eHouse acoustic speech recognition

Portions of the source code of the application:

Override
public boolean onKeyDown (int keycode , KeyEvent event) // software buttons hardwarowych smartphone
{
if ( keycode == KeyEvent.KEYCODE_MENU ) // menu you can put views and software options in order not to occupy the screen for the menu of the software
{
// MessageBox ( ” menu ” ) ;

}
if ( keycode == KeyEvent.KEYCODE_BACK ) // back for smartphones may not work on some options android system
{
MessageBox ( ” Back ” ) ;
}
if ( keycode == KeyEvent.KEYCODE_HOME ) // parent does not support
{
MessageBox ( ” Home ” ) ;
}
if ( keycode == KeyEvent.KEYCODE_SEARCH ) // search short long service automatically search
{
// Support voice recognition ( speech recognition to control the system ehouse )
// trigger events eHouse voice commands
( ehousecommunication . CurrentForm ) = ehousecommunication . TEXT_EVENTS ; // opens a form text
SetView () ; // displays text form

startVoiceRecognitionActivity ( ” Device? ” ) ; // launches a dialog box voice recognition and detection of words for your device ( controller) 1 step ( 2 – event) (3 – transmission method )
}
if ( keycode == KeyEvent.KEYCODE_VOLUME_DOWN ) // change the displayed screen ( form) on the previous
{

if ( ehousecommunication.CurrentForm < ehousecommunication.FORM_MAX ) ehousecommunication.CurrentForm++;
else ehousecommunication.CurrentForm = 1 ;
SetView();
// MessageBox ( ” – ” ) ;
}
if ( keycode == KeyEvent.KEYCODE_VOLUME_UP ) // change the displayed screen form for the next
{
if ( ehousecommunication.CurrentForm > 0) ehousecommunication . CurrentForm– ;
else ehousecommunication.CurrentForm = ehousecommunication.FORM_MAX ;
SetView () ;
// MessageBox ( ” + ” ) ;
}
return false ; // otherwise we lose the back of the application and exits
// super.onKeyDown ( keycode , event) ;
}
// Speech Recognition Support
//

public void startVoiceRecognitionActivity (String caption )
{ /// Caption header – Description of the dialog box speech analysis
Intent intent = new Intent ( RecognizerIntent.ACTION_RECOGNIZE_SPEECH ) ; // analyzer configuration of speech ( speech recognition )
intent.putExtra ( RecognizerIntent.EXTRA_CALLING_PACKAGE , ” eHouse 4 Android ” ) ;
intent.putExtra ( RecognizerIntent.EXTRA_PROMPT , caption ) ; // Sets the description of the dialog box
// Given an hint to the recognizer about what the user is going to say
intent.putExtra ( RecognizerIntent.EXTRA_LANGUAGE_MODEL ,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM ) ;
intent.putExtra ( RecognizerIntent.EXTRA_MAX_RESULTS , 15) ; // return max 15 results from the analyzer speech
intent.putExtra ( RecognizerIntent.EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS , 300 ) ; // length of silence ended signal analysis
intent.putExtra ( RecognizerIntent.EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS , 300 ) ;

// Specify the language recognition .
// Selects Default language . in another case, you can analyze the list of languages ​​and select the appropriate (if the default language setting differs from the language which we intend to analyze )
/* if (! mSupportedLanguageView.getSelectedItem().toString().equals ( ” Default ” )) {
intent.putExtra ( RecognizerIntent.EXTRA_LANGUAGE ,
mSupportedLanguageView.getSelectedItem ().toString()) ;
} */

startActivityForResult (intent , VOICE_RECOGNITION_REQUEST_CODE ) ; //

}
//////////////////////////////////////////////////
Override
protected void onActivityResult (int requestCode , int the Result , Intent date)
{
if ( requestCode == SPEECH_REQUEST_CODE ) // playback ( synthesis of the text) of the speech android
{
if ( Result == TextToSpeech.Engine.CHECK_VOICE_DATA_PASS ) // device is a speech synthesizer ready ( with support and installed packages for acoustic playback of speech )
{
// success , create the TTS instance
tts = new TextToSpeech (this , this) ; // create instantiate voice synthesizer
tts.setLanguage ( Locale.US ) ; // language voice synthesizer (you can choose the right one when it is available )

}
else
{// Missing data , install it // not voice synthesizer components , enforces the installation of AndroidMarket
Intent = new Intent installIntent () ;
installIntent.setAction ( TextToSpeech.Engine.ACTION_INSTALL_TTS_DATA ) ;
startActivity ( installIntent ) ; // sets the installation dialog speech synthesizer
}
}
// VOICE REQUEST RESULT – Speech analyzer picked up an acoustic signal
if ( requestCode == VOICE_RECOGNITION_REQUEST_CODE && Result == RESULT_OK )
{
// Fill the list view with the strings the recognizer thought it could have heard
ArrayList < String > Matches = date.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS) ; // list of results text of the speech analyzer
boolean found = false ;
if ( SpeechItem == SPEECH_DEVICENAME ) // first result (acoustic command) name of the device
{
for (int i = 0 ; i< Matches.size() ; i++) // for all results from the analyzer speech
{
for (int k = 0 ; k < RE.DeviceName.getCount(); k++) // for all names drivers
if ( Matches.get(i).compareToIgnoreCase (RE.DeviceName.getItemAtPosition(k).toString()) == 0) // compares the name of the driver of the result of the speech analyzer
{// If the comparison fits
RE.DeviceName.setSelection(k) ; // choose from the list on the form RunEvent proper device

SpeechItem = SPEECH_EVENT ; // sets the next step analyzer of speech ( name of the event to the driver )
SpeechInvalid = 0 ;
startVoiceRecognitionActivity ( ” Event ? ” ) ; // restarts speech analyzer to detect the event name
return ;

}
}
}
else
if ( SpeechItem == SPEECH_EVENT ) // received the results of the speech analyzer with the name of the event
{
for (int i = 0 ; i< Matches.size() ; i++) // for all results speech analyzer
{
for (int k = 0 ; k< RE.EventName.getCount() ; k++ ) // for all events for the selected driver already
if ( Matches.get(i).compareToIgnoreCase (RE.EventName.getItemAtPosition(k).toString()) == 0) // compares the results of (identical)
{
RE.EventName.setSelection(k) ; // selects the name of the event from the list on the form RunEvent
found = true ;
SpeechItem = SPEECH_OK ; // flag next step analyzer speech
SpeechInvalid = 0 ;
startVoiceRecognitionActivity ( ” OK ? ” ) ; // starts next step ( the question of how to send events to the system ( transmission ) )
return ;

}
}
// if previously found nothing ( an exact event name )
for (int i = 0 ; i< Matches.size();i++) // for all results textowych speech analyzer ( for the names of events)
{
for (int k = 0 ; k < RE.EventName.getCount() ; k++ ) // for all names from the list of events for a device
if (( Matches.get(i) + ” (toggle) ” ).compareToIgnoreCase (RE.EventName.getItemAtPosition(k).toString().replaceAll ( ” <- ” , ” ” ) ) == 0)
{// If it is a digital output and is additive ( toggle ) switch ===
RE.EventName.setSelection(k) ; // switch selects an event for a given output
found = true;
SpeechItem = SPEECH_OK ; // sets the next step analyzers speech
SpeechInvalid = 0 ;
startVoiceRecognitionActivity ( ” OK ? ” ) ; // starts next step analyzer speech – the question of how transmission
return ;

}
}
// if previously found nothing checks partial similarity
for (int i = 0 ; i< Matches.size() ; i++) // for the names of events for all results speech analyzer
{ String mat = Matches.get(i).toLowerCase();
for (int k = 0 ; k < RE.EventName.getCount(); k++) // for all events in the list for the selected driver
{
if ( mat.indexOf (RE.EventName.getItemAtPosition(k).toString().toLowerCase()) == 0) // results text of the speech analyzer are contained in the name of the event ( the result of the analyzer in part the event name from the list )

{
RE.EventName.setSelection(k) ; Selects the name of the event
found = true ;
// Not do anything further
/ *
SpeechItem = SPEECH_OK ; // to select the signal to start sending events
SpeechInvalid = 0 ;
startVoiceRecognitionActivity ( ” OK ? ” ) ; // starts next step speech analysis for the method of dispatch
* /

return ;

}
}
}
SpeechItem = SPEECH_DEVICENAME ;

}
else
if ( SpeechItem == SPEECH_OK ) // Analyzer speech after receiving the transmission method – the choice of method to send events to the system
{
for (int i = 0 ; i< Matches.size(); i++) // for all results speech analyzer
{
for (int k = 0 ; k< 10 ; k++ )
if ( Matches.get( i).compareToIgnoreCase (RE.OK[k] ) == 0) // for all phonetically written commands for WiFi
{// In the variable RE . OK

SpeechItem = SPEECH_DEVICENAME ;
SpeechInvalid = 0 ;
RE.SubmitEvent ( RunEvent.WIFI) ; // adds the event to the queue and will be shipped via WIFI

return ;

}
}
// if not WIFI
for (int i = 0 ; i< Matches.size() ; i++ )
{
for (int k = 0 ; k < 10 ; k++ )
if ( Matches.get(i).compareToIgnoreCase (RE.vINTERNET[k] ) == 0) // compares with the command for sending over the Internet
{// Text phonetically stored in the variable vINTERNET

SpeechItem = SPEECH_DEVICENAME ;
SpeechInvalid = 0 ;
RE.SubmitEvent ( RunEvent.INTERNET) ; // send the event via the internet (EDGE , 3G , 4G , GPRS or Other external link depending on the device )
return ;

}
}
/// if you do not find the command for the internet and wifi
for (int i = 0 ; i< Matches.size(); i++ )
{
for (int k = 0 ; k < 10 ; k++ )
if ( Matches.get(i).compareToIgnoreCase(RE.vSMS[k] ) == 0) // check command for sending SMS
{// Phonetically written text in the application variable RE . vSMS

SpeechItem = SPEECH_DEVICENAME ;
SpeechInvalid = 0 ;
RE.SubmitEvent ( RunEvent.SMS) ; // added to the queue and sends an event via SMS
return ;

}
}
// if the command is not for SMS , WiFi , Internet
for (int i = 0 ; i< Matches.size(); i++ )
{
for (int k = 0 ; k < 10 ; k++ )
if ( Matches.get(i).compareToIgnoreCase (RE.vemail[k] ) == 0) // command compares the list to send to Emails
{// Phonetically written text in a variable RE . vemail
SpeechItem = SPEECH_DEVICENAME ;
SpeechInvalid = 0 ;
RE.SubmitEvent ( RunEvent.EMAIL ) ;
return ;

}
}
SpeechItem = SPEECH_DEVICENAME ;
}

}

super.onActivityResult ( requestCode , ResultCode , date) ;
}