\r
-X.Audio = {\r
+/*\r
WebAudio : 1,\r
- HTML5 : 2,\r
+ HTMLAudio : 2,\r
Flash : 3,\r
Silverlight : 4,\r
Unity : 5,\r
WMP : 6,\r
RealPlayer : 7,\r
QuickTime : 8,\r
- \r
- create : function( sourceList, opt_option ){\r
- return new X_AudioProxy( X.Type.isArray( sourceList ) ? X_Object_cloneArray( sourceList ) : [ sourceList ], opt_option || {} );\r
+ */\r
+\r
+var X_Audio_BACKENDS = []; // Array.<Hash>\r
+\r
+X_TEMP.onSystemReady.push(\r
+ function(){\r
+ var canPlay = X[ 'Audio' ][ 'canPlay' ] = {},\r
+ i = X_Audio_BACKENDS.length,\r
+ be;\r
+ for( ; i; ){\r
+ be = X_Audio_BACKENDS[ --i ];\r
+ X_Object_override( canPlay, be.canPlay );\r
+ X[ 'Audio' ][ be.backendName ] = be.backendID;\r
+ };\r
+ });\r
+\r
+/**\r
+ * <p>複数のオーディオ・バックエンドから、与えられた音声を再生可能なものを見つけ、音声を再生します。\r
+ * <p>HTMLAudio の動作・機能がブラウザ毎にバラバラなのに業を煮やし、メソッドやイベントは独自に定義しています。\r
+ * <h4>バックエンドの種類</h4>\r
+ * <p>HTMLAudio, WebAudio, Silverlight, WMP\r
+ * <h4>イベント</h4>\r
+ * <dl>\r
+ * <dt>X.Event.BACKEND_READY <dd>音声(src リスト)を再生可能なバックエンドが見つかった。\r
+ * <dt>X.Event.BACKEND_NONE <dd>音声を再生可能なバックエンドが見つからなかった。Audio は kill されます。\r
+ * <dt>X.Event.MEDIA_CAN_TOUCH <dd>モバイル端末の制約で音声の再生またはロードに、タッチを必要とする場合、タッチイベント内で play を呼び出す準備が出来たことを通知する。\r
+ * <dt>X.Event.READY <dd>再生可能、実際の状態は canplay から loadeddata まで様々、、、\r
+ * <dt>X.Event.ERROR <dd><ul>\r
+ * <li> 1 : ユーザーによってメディアの取得が中断された\r
+ * <li> 2 : ネットワークエラー\r
+ * <li> 3 : メディアのデコードエラー\r
+ * <li> 4 : メディアがサポートされていない\r
+ * </ul>\r
+ * <dt>X.Event.MEDIA_PLAYING <dd>再生中に1秒以下のタイミングで発生.currentTime が取れる?\r
+ * <dt>X.Event.MEDIA_LOOP <dd>ループ直前に発生、キャンセル可能\r
+ * <dt>X.Event.MEDIA_LOOPED <dd>ループ時に発生\r
+ * <dt>X.Event.MEDIA_ENDED <dd>再生位置の(音声の)最後についた\r
+ * <dt>X.Event.MEDIA_PAUSED <dd>ポーズした\r
+ * <dt>X.Event.MEDIA_WAITING <dd>再生中に音声が待機状態に。\r
+ * <dt>X.Event.MEDIA_SEEKING <dd>シーク中に音声が待機状態に。\r
+ * </dl>\r
+ * \r
+ * @alias X.Audio\r
+ * @class 各種オーディオ機能をラップしインターフェイスを共通化する。\r
+ * @constructs Audio\r
+ * @extends {EventDispatcher}\r
+ * @param {array|string} sourceList\r
+ * @param {object=} opt_option\r
+ * @example //\r
+ * var audio = X.Audio( [ 'etc/special.mp3', 'etc/special.ogg', 'etc/special.wav' ] )\r
+ .listenOnce( X.Event.READY, onReady );\r
+ */\r
+X[ 'Audio' ] = X_EventDispatcher[ 'inherits' ](\r
+ 'X.Audio',\r
+ X_Class.POOL_OBJECT,\r
+ {\r
+ /**\r
+ * 音声の url。X.Event.BACKEND_READY で設定される。\r
+ * @alias Audio.prototype.source\r
+ * @type {string}\r
+ */\r
+ 'source' : '',\r
+ \r
+ /**\r
+ * 音声再生バックエンドの名前。X.Event.BACKEND_READY で設定される。\r
+ * @alias Audio.prototype.backendName\r
+ * @type {string}\r
+ */\r
+ 'backendName' : '',\r
+\r
+ 'Constructor' : function( sourceList, opt_option ){\r
+ X_Audio_startDetectionBackend(\r
+ X_Audio_BACKENDS[ 0 ], this,\r
+ X_Type_isArray( sourceList ) ? X_Array_copy( sourceList ) : [ sourceList ],\r
+ opt_option || {} );\r
+ this[ 'listenOnce' ]( [ X_EVENT_BACKEND_READY, X_EVENT_BACKEND_NONE, X_EVENT_KILL_INSTANCE ], X_Audio_handleEvent );\r
+ X_ViewPort[ 'listenOnce' ]( X_EVENT_UNLOAD, this, X_AudioSprite_handleEvent );\r
+ },\r
+ \r
+ /**\r
+ * 再生。開始位置・終了位置、ループの有無、ループ以降の開始位置、ループ以降の終了位置\r
+ * @alias Audio.prototype.play\r
+ * @param {number=} startTime 開始時間を ms で\r
+ * @param {number=} endTime 終了時間を ms で\r
+ * @param {boolean=} endTime に達した際に曲をループさせるか\r
+ * @param {number=} loopStartTime ループ以後の開始時間を ms で\r
+ * @param {number=} loopEndTime ループ以後の終了時間を ms で\r
+ * @return {Audio} メソッドチェーン\r
+ */\r
+ 'play' : function( startTime, endTime, loop, loopStartTime, loopEndTime ){\r
+ var pair = X_Pair_get( this );\r
+ pair && pair.play( startTime, endTime, loop, loopStartTime, loopEndTime );\r
+ return this;\r
+ },\r
+ /**\r
+ * シーク、再生中で無い場合は次回再生開始位置の指定のみ\r
+ * @alias Audio.prototype.seek\r
+ * @param {number} seekTime シーク位置を ms で\r
+ * @return {Audio} メソッドチェーン\r
+ */\r
+ 'seek' : function( seekTime ){\r
+ var pair = X_Pair_get( this );\r
+ pair && pair.seek( seekTime );\r
+ return this;\r
+ },\r
+ /**\r
+ * ポーズ\r
+ * @alias Audio.prototype.pause\r
+ * @return {Audio} メソッドチェーン\r
+ */\r
+ 'pause' : function(){\r
+ var pair = X_Pair_get( this );\r
+ pair && pair.pause();\r
+ return this;\r
+ },\r
+ /**\r
+ * 状態の getter と setter\r
+ * @alias Audio.prototype.state\r
+ * @param {object=} obj setter の場合、上書きする値を格納したobject\r
+ * @return {Audio|object}\r
+ * @example\r
+ * audio.setState(\r
+ {\r
+ 'startTime' : 0,\r
+ 'endTime' : 80000,\r
+ 'loopStartTime' : 120000,\r
+ 'loopEndTime' : 200000,\r
+ 'currentTime' : 0,\r
+ 'loop' : true,\r
+ 'looded' : false,\r
+ 'volume' : 1,\r
+ 'autoplay' : true\r
+ });\r
+ */\r
+ 'state' : function( obj ){\r
+ var pair = X_Pair_get( this );\r
+ if( obj === undefined ){\r
+ return pair ? pair.getState() :\r
+ {\r
+ 'startTime' : -1,\r
+ 'endTime' : -1,\r
+ 'loopStartTime' : -1,\r
+ 'loopEndTime' : -1,\r
+ 'currentTime' : -1,\r
+ 'loop' : false,\r
+ 'looded' : false,\r
+ 'error' : 0,\r
+ 'autoplay' : false,\r
+ 'playing' : false,\r
+ 'source' : this[ 'source' ],\r
+ 'duration' : 0,\r
+ 'volume' : 0.5\r
+ };\r
+ };\r
+ pair && pair.setState( obj );\r
+ return this;\r
+ }, \r
+ /**\r
+ * ループの setter\r
+ * @alias Audio.prototype.loop\r
+ * @param {boolean} v \r
+ * @return {Audio}\r
+ */\r
+ 'loop' : function( v ){\r
+ var pair = X_Pair_get( this );\r
+ pair && pair.loop( v );\r
+ return this;\r
+ },\r
+ /**\r
+ * ボリュームの setter 実装不十分!\r
+ * @alias Audio.prototype.volume\r
+ * @param {number} v \r
+ * @return {Audio}\r
+ */\r
+ 'volume' : function( v ){\r
+ var pair = X_Pair_get( this );\r
+ pair && pair.volume( v );\r
+ return this;\r
+ },\r
+ /**\r
+ * 再生位置のsetter。\r
+ * @alias Audio.prototype.currentTime\r
+ * @param {number} v msで\r
+ * @return {Audio}\r
+ */\r
+ 'currentTime' : function( v ){\r
+ var pair = X_Pair_get( this );\r
+ pair && pair.currentTime( v );\r
+ return this;\r
+ },\r
+ /**\r
+ * 再生中か?\r
+ * @alias Audio.prototype.isPlaying\r
+ * @return {boolean}\r
+ */\r
+ 'isPlaying' : function(){\r
+ var pair = X_Pair_get( this );\r
+ return pair && pair.playing;\r
+ }\r
+ \r
}\r
+);\r
+\r
+function X_Audio_handleEvent( e ){\r
+ var backend, pair;\r
+ \r
+ switch( e.type ){\r
+ case X_EVENT_BACKEND_READY :\r
+ backend = X_Audio_BACKENDS[ e[ 'backendID' ] ];\r
+ \r
+ this[ 'unlisten' ]( X_EVENT_BACKEND_NONE, X_Audio_handleEvent );\r
+ this[ 'source' ] = e[ 'source' ];\r
+ this[ 'backendName' ] = backend.backendName;\r
+ \r
+ X_Pair_create( this, backend.klass( this, e[ 'source' ], e[ 'option' ] ) );\r
+ this[ 'listenOnce' ]( X_EVENT_READY, X_Audio_handleEvent );\r
+ break;\r
+ \r
+ case X_EVENT_READY :\r
+ pair = X_Pair_get( this );\r
+ ( pair.autoplay || pair._playReserved ) && pair.actualPlay();\r
+ delete pair._playReserved;\r
+ break;\r
+ \r
+ case X_EVENT_BACKEND_NONE :\r
+ case X_EVENT_UNLOAD :\r
+ this[ 'kill' ]();\r
+ break;\r
+ \r
+ case X_EVENT_KILL_INSTANCE :\r
+ X_ViewPort[ 'unlisten' ]( X_EVENT_UNLOAD, this, X_AudioSprite_handleEvent );\r
+ if( backend = X_Pair_get( this ) ){\r
+ backend[ 'kill' ]();\r
+ X_Pair_release( this, backend );\r
+ };\r
+ break;\r
+ };\r
};\r
\r
-var X_Audio_BACKENDS = [],\r
- X_Audio_WRAPPER_LIST = [];\r
\r
/*\r
* TODO preplayerror play してみたら error が出た、backend の変更。\r
*/\r
\r
-function X_Audio_startDetectionBackend( backend, proxy, sourceList, option ){\r
+function X_Audio_startDetectionBackend( backend, xaudio, sourceList, option ){\r
var source = sourceList[ 0 ] || '', \r
- ext = X_URL_cleanup( source ).split( '.' ).pop(),\r
+ ext = X_URL_getEXT( source ),\r
sup;\r
\r
if( source && backend ){\r
- sup = [ proxy, sourceList, option, source, ext ];\r
+ sup = [ xaudio, sourceList, option, source, ext ];\r
sup[ 5 ] = sup;\r
\r
- proxy.listenOnce( [ 'support', 'nosupport' ], backend, X_Audio_onEndDetection, sup );\r
- backend.detect( proxy, source, ext );\r
+ xaudio[ 'listenOnce' ]( X_EVENT_COMPLETE, backend, X_Audio_onEndedDetection, sup );\r
+ backend.detect( xaudio, source, ext );\r
} else {\r
- proxy.asyncDispatch( 'nobackend' );\r
+ xaudio[ 'asyncDispatch' ]( X_EVENT_BACKEND_NONE );\r
};\r
};\r
\r
-function X_Audio_onEndDetection( e, proxy, sourceList, option, source, ext, sup ){\r
+function X_Audio_onEndedDetection( e, xaudio, sourceList, option, source, ext, sup ){\r
var i = X_Audio_BACKENDS.indexOf( this ), backend;\r
\r
- proxy.unlisten( [ 'support', 'nosupport' ], this, X_Audio_onEndDetection, sup );\r
- \r
- switch( e.type ){\r
- case 'support' :\r
- proxy._backend = i;\r
- proxy.asyncDispatch( {\r
- type : 'backendfound',\r
- option : option,\r
- source : source,\r
- backendName : this.backendName\r
- } );\r
- break;\r
- case 'nosupport' :\r
- console.log( 'No ' + source + ' ' + this.backendName );\r
- if( sup[ 3 ] = source = sourceList[ sourceList.indexOf( source ) + 1 ] ){\r
- sup[ 4 ] = ext = X_URL_cleanup( source ).split( '.' ).pop();\r
- proxy.listenOnce( [ 'support', 'nosupport' ], this, X_Audio_onEndDetection, sup );\r
- this.detect( proxy, source, ext );\r
- } else\r
- if( backend = X_Audio_BACKENDS[ i + 1 ] ){\r
- X_Audio_startDetectionBackend( backend, proxy, sourceList, option );\r
- } else {\r
- proxy.asyncDispatch( 'nobackend' );\r
- };\r
- break;\r
+ if( e.canPlay ){\r
+ xaudio[ 'asyncDispatch' ]( {\r
+ type : X_EVENT_BACKEND_READY,\r
+ 'option' : option,\r
+ 'source' : source,\r
+ 'backendName' : this[ 'backendName' ],\r
+ 'backendID' : i\r
+ } ); \r
+ } else {\r
+ console.log( 'No ' + source + ' ' + this[ 'backendName' ] );\r
+ if( sup[ 3 ] = source = sourceList[ sourceList.indexOf( source ) + 1 ] ){\r
+ sup[ 4 ] = ext = X_URL_getEXT( source );\r
+ xaudio[ 'listenOnce' ]( X_EVENT_COMPLETE, this, X_Audio_onEndedDetection, sup );\r
+ this.detect( xaudio, source, ext );\r
+ } else\r
+ if( backend = X_Audio_BACKENDS[ i + 1 ] ){\r
+ X_Audio_startDetectionBackend( backend, xaudio, sourceList, option );\r
+ } else {\r
+ xaudio[ 'asyncDispatch' ]( X_EVENT_BACKEND_NONE );\r
+ }; \r
};\r
};\r
\r
-function X_AudioProxy_getAudioWrapper( proxy ){\r
- var i = X_Audio_WRAPPER_LIST.length;\r
- for( ; i; ){\r
- if( X_Audio_WRAPPER_LIST[ --i ].proxy === proxy ) return X_Audio_WRAPPER_LIST[ i ];\r
- };\r
-};\r
\r
-var X_AudioProxy = X.EventDispatcher.inherits(\r
- 'X.AV.AudioProxy',\r
- X.Class.POOL_OBJECT,\r
+\r
+var X_AudioBase = X_EventDispatcher[ 'inherits' ](\r
+ 'X.AudioBase',\r
+ X_Class.ABSTRACT,\r
{\r
- source : '',\r
- backendName : '',\r
- _backend : -1,\r
+ disatcher : null,\r
\r
- Constructor : function( sourceList, option ){\r
- X_Audio_startDetectionBackend( X_Audio_BACKENDS[ 0 ], this, sourceList, option );\r
- this.listenOnce( [ 'backendfound', 'nobackend', X.Event.KILL_INSTANCE ], X_AudioProxy_handleEvent );\r
- },\r
+ startTime : 0, // state_startTime\r
+ endTime : -1, // state_startTime\r
+ loopStartTime : -1,\r
+ loopEndTime : -1,\r
+ seekTime : -1,\r
+ duration : 0, //\r
+\r
+ playing : false,\r
+ error : 0, // \r
+ autoLoop : false,\r
+ looped : false,\r
+ autoplay : false,//\r
+ gain : 0.5,\r
\r
- close : function(){\r
- return this._backend !== -1 && X_AudioProxy_getAudioWrapper( this ).close();\r
- },\r
+ _playReserved : false,\r
\r
play : function( startTime, endTime, loop, loopStartTime, loopEndTime ){\r
- var state, duration;\r
if( 0 <= startTime ){\r
- this.state( {\r
- currentTime : startTime,\r
- startTime : startTime,\r
- endTime : endTime,\r
- loop : loop,\r
- loopStartTime : loopStartTime,\r
- loopEndTime : loopEndTime\r
+ this.setState( {\r
+ 'currentTime' : startTime,\r
+ 'startTime' : startTime,\r
+ 'endTime' : endTime,\r
+ 'loop' : loop,\r
+ 'looped' : false,\r
+ 'loopStartTime' : loopStartTime,\r
+ 'loopEndTime' : loopEndTime\r
} );\r
};\r
- this._backend !== -1 && X_AudioProxy_getAudioWrapper( this ).play();\r
- return this;\r
+ // canPlay() : autoplay = true\r
+ this.actualPlay();\r
},\r
\r
seek : function( seekTime ){\r
- var state = this.state(),\r
- end = X_AudioWrapper_getEndTime( X_AudioProxy_getAudioWrapper( this ) );\r
- if( seekTime < end ){\r
- this.state( { currentTime : seekTime } );\r
+ if( seekTime < X_Audio_getEndTime( this ) ){\r
+ this.setState( { 'currentTime' : seekTime } );\r
};\r
- return this;\r
},\r
\r
pause : function(){\r
- this.state().playing && X_AudioProxy_getAudioWrapper( this ).pause();\r
- return this;\r
- },\r
- \r
- state : function( obj ){\r
- var backend = this._backend !== -1 && X_AudioProxy_getAudioWrapper( this );\r
-\r
- if( obj === undefined ){\r
- return backend ?\r
- backend.state() :\r
- {\r
- startTime : -1,\r
- endTime : -1,\r
- loopStartTime : -1,\r
- loopEndTime : -1,\r
- currentTime : -1,\r
- loop : false,\r
- looded : false,\r
- error : false,\r
- playing : false,\r
- \r
- source : this.source || '',\r
- duration : 0\r
- };\r
- };\r
- backend && backend.state( obj );\r
- return this;\r
+ this.seekTime = this.getActualCurrentTime();\r
+ this.playing && this.actualPause();\r
+ // delete this.autoplay\r
+ // delete this.playing\r
}, \r
\r
loop : function( v ){\r
- var backend = this._backend !== -1 && X_AudioProxy_getAudioWrapper( this );\r
if( v === undefined ){\r
- return backend && backend.state().loop;\r
+ return this.autoLoop;\r
};\r
- backend && backend.state( { loop : v } );\r
- return this;\r
+ this.setState( { 'loop' : v } );\r
},\r
\r
volume : function( v ){\r
- var backend = this._backend !== -1 && X_AudioProxy_getAudioWrapper( this );\r
if( v === undefined ){\r
- return backend && backend.state().volume;\r
+ return this.gain;\r
};\r
- backend && backend.state( { volume : v } );\r
- return this;\r
+ this.setState( { 'volume' : v } );\r
},\r
\r
currentTime : function( v ){\r
- var backend = this._backend !== -1 && X_AudioProxy_getAudioWrapper( this );\r
if( v === undefined ){\r
- return backend && backend.state().currentTime;\r
+ return this.playing ? this.getActualCurrentTime() : this.seekTime;\r
};\r
- backend && backend.state( { currentTime : v } );\r
- return this;\r
+ this.setState( { 'currentTime' : v } );\r
},\r
-\r
- isPlaying : function(){\r
- return this._backend !== -1 && X_AudioProxy_getAudioWrapper( this ).state().playing;\r
- }\r
\r
- }\r
-);\r
-\r
-function X_AudioProxy_handleEvent( e ){\r
- switch( e.type ){\r
- case 'backendfound' :\r
- this.unlisten( 'nobackend', X_AudioProxy_handleEvent );\r
- this.source = e.source;\r
- this.backendName = X_Audio_BACKENDS[ this._backend ].backendName;\r
- X_Audio_WRAPPER_LIST.push( new X_Audio_BACKENDS[ this._backend ].klass( this, e.source, e.option ) );\r
- break;\r
- \r
- case 'nobackend' :\r
- this.kill();\r
- break;\r
+ getState : function(){\r
+ \r
+ return {\r
+ 'startTime' : this.startTime,\r
+ 'endTime' : this.endTime < 0 ? this.duration : this.endTime,\r
+ 'loopStartTime' : this.loopStartTime < 0 ? this.startTime : this.loopStartTime,\r
+ 'loopEndTime' : this.loopEndTime < 0 ? ( this.endTime || this.duration ) : this.loopEndTime,\r
+ 'loop' : this.autoLoop,\r
+ 'looped' : this.looped,\r
+ 'volume' : this.gain,\r
+ 'playing' : this.playing, \r
+ 'duration' : this.duration,\r
+ 'autoplay' : this.autoplay,\r
+ \r
+ 'currentTime' : this.playing ? this.getActualCurrentTime() : this.seekTime,\r
+ 'error' : this.getActualError ? this.getActualError() : this.error\r
+ };\r
+ },\r
\r
- case X.Event.KILL_INSTANCE :\r
- this.close();\r
- break;\r
- };\r
-};\r
-\r
-function X_AudioWrapper_updateStates( audioWrapper, obj ){\r
- var playing = audioWrapper.playing,\r
- k, v,\r
- end = 0, seek = 0, volume = 0;\r
- \r
- for( k in obj ){\r
- v = obj[ k ];\r
- switch( k ){\r
- case 'currentTime' :\r
- v = X_AudioWrapper_timeStringToNumber( v );\r
- if( X.Type.isNumber( v ) ){\r
- if( playing ){\r
- if( audioWrapper.state().currentTime !== v ){\r
- audioWrapper.seekTime = v;\r
- seek = 2;\r
+ setState : function( obj ){\r
+ var playing = this.playing,\r
+ k, v,\r
+ end = 0, seek = 0, volume = 0;\r
+ \r
+ for( k in obj ){\r
+ v = obj[ k ];\r
+ switch( k ){\r
+ case 'currentTime' :\r
+ v = X_Audio_timeStringToNumber( v );\r
+ if( X_Type_isNumber( v ) ){\r
+ if( playing ){\r
+ if( this.getActualCurrentTime() !== v ){\r
+ seek = 2;\r
+ this.seekTime = v;\r
+ }; \r
+ } else {\r
+ this.seekTime = v;\r
+ };\r
+ } else {\r
+ continue;\r
};\r
- } else {\r
- audioWrapper.seekTime = v;\r
- };\r
- } else {\r
- continue;\r
- };\r
- break;\r
+ break;\r
+ \r
+ case 'startTime' :\r
+ v = X_Audio_timeStringToNumber( v );\r
+ if( v || v === 0 ){\r
+ if( this.startTime !== v ){\r
+ this.startTime = v; \r
+ };\r
+ } else {\r
+ delete this.startTime;\r
+ };\r
+ break;\r
\r
- case 'startTime' :\r
- case 'endTime' :\r
- case 'loopStartTime' :\r
- case 'loopEndTime' :\r
- v = X_AudioWrapper_timeStringToNumber( v );\r
- console.log( k + ' ' + v );\r
- if( v || v === 0 ){\r
- if( audioWrapper[ k ] !== v ){\r
- audioWrapper[ k ] = v;\r
+ case 'endTime' :\r
+ v = X_Audio_timeStringToNumber( v );\r
+ if( v || v === 0 ){\r
+ if( this.endTime !== v ){\r
+ this.endTime = v;\r
+ if( playing ) end = 1; \r
+ };\r
+ } else {\r
+ delete this.endTime;\r
+ if( playing ) end = 1;\r
+ };\r
+ break;\r
\r
- // 再生中の endTime の変更\r
- if( playing && ( k === 'endTime' || k === 'loopEndTime' ) ) end = 1; \r
- };\r
- } else {\r
- delete audioWrapper[ k ];\r
- if( playing && ( k === 'endTime' || k === 'loopEndTime' ) ) end = 1;\r
- };\r
- break;\r
-\r
- case 'looped' :\r
- if( playing ) seek = 2;\r
- case 'loop' :\r
- case 'autoplay' :\r
- if( X.Type.isBoolean( v ) && audioWrapper[ k ] !== v ){\r
- audioWrapper[ k ] = v;\r
+ case 'loopStartTime' :\r
+ v = X_Audio_timeStringToNumber( v );\r
+ if( v || v === 0 ){\r
+ if( this.loopStartTime !== v ){\r
+ this.loopStartTime = v; \r
+ };\r
+ } else {\r
+ delete this.loopStartTime;\r
+ };\r
+ break;\r
+ \r
+ case 'loopEndTime' :\r
+ v = X_Audio_timeStringToNumber( v );\r
+ if( v || v === 0 ){\r
+ if( this.loopEndTime !== v ){\r
+ this.loopEndTime = v;\r
+ if( playing ) end = 1; \r
+ };\r
+ } else {\r
+ delete this.loopEndTime;\r
+ if( playing ) end = 1;\r
+ };\r
+ break;\r
+ \r
+ case 'looped' :\r
+ if( X_Type_isBoolean( v ) && this.looped !== v ){\r
+ this.looped = v;\r
+ if( playing ) seek = 2;\r
+ };\r
+ break;\r
+ \r
+ case 'loop' :\r
+ if( X_Type_isBoolean( v ) && this.autoLoop !== v ){\r
+ this.autoLoop = v;\r
+ };\r
+ break;\r
+ \r
+ case 'autoplay' :\r
+ if( X_Type_isBoolean( v ) && this.autoplay !== v ){\r
+ this.autoplay = v;\r
+ };\r
+ break;\r
+ \r
+ case 'volume' :\r
+ if( X_Type_isNumber( v ) ){\r
+ v = v < 0 ? 0 : 1 < v ? 1 : v;\r
+ if( this.gain !== v ){\r
+ this.gain = v;\r
+ // if playing -> update\r
+ if( playing ) volume = 4;\r
+ };\r
+ };\r
+ break;\r
+ case 'useVideo' :\r
+ break;\r
+ default :\r
+ alert( 'bad arg! ' + k );\r
};\r
- break;\r
+ };\r
+ \r
+ if( this.endTime < this.startTime ||\r
+ ( this.loopEndTime < 0 ? this.endTime : this.loopEndTime ) < ( this.loopStartTime < 0 ? this.startTime : this.loopStartTime ) ||\r
+ X_Audio_getEndTime( this ) < this.seekTime// ||\r
+ //this.duration < this.endTime\r
+ ){\r
+ console.log( 'setState 0:' + this.startTime + ' -> ' + this.endTime + ' looped:' + this.looped + ' 1:' + this.loopStartTime + ' -> ' + this.loopEndTime );\r
+ return;\r
+ };\r
+ \r
+ v = end + seek + volume;\r
+ return v && this.playing && this.afterUpdateState( v );\r
+ }\r
+ \r
+ }\r
+);\r
\r
- case 'volume' :\r
- if( X.Type.isNumber( v ) ){\r
- v = v < 0 ? 0 : 1 < v ? 1 : v;\r
- if( audioWrapper[ k ] !== v ){\r
- audioWrapper[ k ] = v;\r
- // if playing -> update\r
- if( playing ) volume = 4;\r
- };\r
- };\r
- break;\r
- };\r
- };\r
- \r
- if( audioWrapper.endTime < audioWrapper.startTime ||\r
- ( audioWrapper.loopEndTime < 0 ? audioWrapper.endTime : audioWrapper.loopEndTime ) < ( audioWrapper.loopStartTime < 0 ? audioWrapper.startTime : audioWrapper.loopStartTime ) ||\r
- X_AudioWrapper_getEndTime( audioWrapper ) < audioWrapper.seekTime// ||\r
- //audioWrapper.duration < audioWrapper.endTime\r
- ){\r
- console.log( 'error @updateStateObject() begin:' + audioWrapper.startTime + ' end:' + audioWrapper.endTime + ' d:' + audioWrapper.duration + ' ls:' + audioWrapper.loopStartTime );\r
- return 0;\r
- };\r
- \r
- return end + seek + volume;\r
-};\r
\r
-function X_AudioWrapper_timeStringToNumber( time ){\r
+function X_Audio_timeStringToNumber( time ){\r
var ary, ms, s = 0, m = 0, h = 0;\r
- if( X.Type.isNumber( time ) ) return time;\r
- if( !X.Type.isString( time ) || !time.length ) return;\r
+\r
+ if( X_Type_isNumber( time ) ) return time;\r
+ if( !X_Type_isString( time ) || !time.length ) return;\r
\r
ary = time.split( '.' );\r
- ms = parseInt( ( ary[ 1 ] + '000' ).substr( 0, 3 ) ) || 0;\r
+ ms = parseFloat( ( ary[ 1 ] + '000' ).substr( 0, 3 ) ) || 0;\r
\r
ary = ary[ 0 ].split( ':' );\r
if( 3 < ary.length ) return;\r
case 0 :\r
break;\r
case 1 :\r
- s = parseInt( ary[ 0 ] ) || 0;\r
+ s = parseFloat( ary[ 0 ] ) || 0;\r
break;\r
case 2 :\r
- m = parseInt( ary[ 0 ] ) || 0;\r
- s = parseInt( ary[ 1 ] ) || 0;\r
+ m = parseFloat( ary[ 0 ] ) || 0;\r
+ s = parseFloat( ary[ 1 ] ) || 0;\r
if( 60 <= s ) alert( 'invalid time string ' + time );\r
break;\r
case 3 :\r
- h = parseInt( ary[ 0 ] ) || 0;\r
- m = parseInt( ary[ 1 ] ) || 0;\r
- s = parseInt( ary[ 2 ] ) || 0;\r
+ h = parseFloat( ary[ 0 ] ) || 0;\r
+ m = parseFloat( ary[ 1 ] ) || 0;\r
+ s = parseFloat( ary[ 2 ] ) || 0;\r
if( 60 <= s ) alert( 'invalid time string ' + time );\r
if( 60 <= m ) alert( 'invalid time string ' + time );\r
break;\r
return ms < 0 ? 0 : ms;\r
};\r
\r
-function X_AudioWrapper_getStartTime( audioWrapper, endTime, delSeekTime ){\r
- var seek = audioWrapper.seekTime;\r
- if( delSeekTime ) delete audioWrapper.seekTime;\r
+function X_Audio_getStartTime( audioBase, endTime, delSeekTime ){\r
+ var seek = audioBase.seekTime;\r
+ \r
+ if( delSeekTime ) delete audioBase.seekTime;\r
\r
if( 0 <= seek ){\r
- if( audioWrapper.duration <= seek || endTime < seek ) return 0;\r
+ if( audioBase.duration <= seek || endTime < seek ) return 0;\r
return seek;\r
};\r
\r
- if( audioWrapper.looped && 0 <= audioWrapper.loopStartTime ){\r
- if( audioWrapper.duration <= audioWrapper.loopStartTime || endTime < audioWrapper.loopStartTime ) return 0;\r
- return audioWrapper.loopStartTime;\r
+ if( audioBase.looped && 0 <= audioBase.loopStartTime ){\r
+ if( audioBase.duration <= audioBase.loopStartTime || endTime < audioBase.loopStartTime ) return 0;\r
+ return audioBase.loopStartTime;\r
};\r
\r
- if( audioWrapper.startTime < 0 || audioWrapper.duration <= audioWrapper.startTime ) return 0;\r
- return audioWrapper.startTime;\r
+ if( audioBase.startTime < 0 || audioBase.duration <= audioBase.startTime ) return 0;\r
+ return audioBase.startTime;\r
};\r
\r
-function X_AudioWrapper_getEndTime( audioWrapper ){\r
- var duration = audioWrapper.duration;\r
+function X_Audio_getEndTime( audioBase ){\r
+ var duration = audioBase.duration;\r
\r
- if( audioWrapper.looped && 0 <= audioWrapper.loopEndTime ){\r
- if( duration <= audioWrapper.loopEndTime ) return duration;\r
- return audioWrapper.loopEndTime;\r
+ if( audioBase.looped && 0 <= audioBase.loopEndTime ){\r
+ if( duration <= audioBase.loopEndTime ) return duration;\r
+ return audioBase.loopEndTime;\r
};\r
\r
- if( audioWrapper.endTime < 0 || duration <= audioWrapper.endTime ) return duration;\r
- return audioWrapper.endTime;\r
+ if( audioBase.endTime < 0 || duration <= audioBase.endTime ) return duration;\r
+ return audioBase.endTime;\r
};\r
\r