/src/mozilla-central/dom/media/webspeech/synth/SpeechSynthesisUtterance.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "nsCOMPtr.h" |
8 | | #include "nsCycleCollectionParticipant.h" |
9 | | #include "nsGkAtoms.h" |
10 | | |
11 | | #include "mozilla/dom/SpeechSynthesisEvent.h" |
12 | | #include "mozilla/dom/SpeechSynthesisUtteranceBinding.h" |
13 | | #include "SpeechSynthesisUtterance.h" |
14 | | #include "SpeechSynthesisVoice.h" |
15 | | |
16 | | #include <stdlib.h> |
17 | | |
18 | | namespace mozilla { |
19 | | namespace dom { |
20 | | |
21 | | NS_IMPL_CYCLE_COLLECTION_INHERITED(SpeechSynthesisUtterance, |
22 | | DOMEventTargetHelper, mVoice); |
23 | | |
24 | 0 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(SpeechSynthesisUtterance) |
25 | 0 | NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) |
26 | | |
27 | | NS_IMPL_ADDREF_INHERITED(SpeechSynthesisUtterance, DOMEventTargetHelper) |
28 | | NS_IMPL_RELEASE_INHERITED(SpeechSynthesisUtterance, DOMEventTargetHelper) |
29 | | |
30 | | SpeechSynthesisUtterance::SpeechSynthesisUtterance(nsPIDOMWindowInner* aOwnerWindow, |
31 | | const nsAString& text) |
32 | | : DOMEventTargetHelper(aOwnerWindow) |
33 | | , mText(text) |
34 | | , mVolume(1) |
35 | | , mRate(1) |
36 | | , mPitch(1) |
37 | | , mState(STATE_NONE) |
38 | | , mPaused(false) |
39 | 0 | { |
40 | 0 | } |
41 | | |
42 | 0 | SpeechSynthesisUtterance::~SpeechSynthesisUtterance() {} |
43 | | |
44 | | JSObject* |
45 | | SpeechSynthesisUtterance::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) |
46 | 0 | { |
47 | 0 | return SpeechSynthesisUtterance_Binding::Wrap(aCx, this, aGivenProto); |
48 | 0 | } |
49 | | |
50 | | nsISupports* |
51 | | SpeechSynthesisUtterance::GetParentObject() const |
52 | 0 | { |
53 | 0 | return GetOwner(); |
54 | 0 | } |
55 | | |
56 | | already_AddRefed<SpeechSynthesisUtterance> |
57 | | SpeechSynthesisUtterance::Constructor(GlobalObject& aGlobal, |
58 | | ErrorResult& aRv) |
59 | 0 | { |
60 | 0 | return Constructor(aGlobal, EmptyString(), aRv); |
61 | 0 | } |
62 | | |
63 | | already_AddRefed<SpeechSynthesisUtterance> |
64 | | SpeechSynthesisUtterance::Constructor(GlobalObject& aGlobal, |
65 | | const nsAString& aText, |
66 | | ErrorResult& aRv) |
67 | 0 | { |
68 | 0 | nsCOMPtr<nsPIDOMWindowInner> win = do_QueryInterface(aGlobal.GetAsSupports()); |
69 | 0 |
|
70 | 0 | if (!win) { |
71 | 0 | aRv.Throw(NS_ERROR_FAILURE); |
72 | 0 | return nullptr; |
73 | 0 | } |
74 | 0 | |
75 | 0 | RefPtr<SpeechSynthesisUtterance> object = |
76 | 0 | new SpeechSynthesisUtterance(win, aText); |
77 | 0 | return object.forget(); |
78 | 0 | } |
79 | | |
80 | | void |
81 | | SpeechSynthesisUtterance::GetText(nsString& aResult) const |
82 | 0 | { |
83 | 0 | aResult = mText; |
84 | 0 | } |
85 | | |
86 | | void |
87 | | SpeechSynthesisUtterance::SetText(const nsAString& aText) |
88 | 0 | { |
89 | 0 | mText = aText; |
90 | 0 | } |
91 | | |
92 | | void |
93 | | SpeechSynthesisUtterance::GetLang(nsString& aResult) const |
94 | 0 | { |
95 | 0 | aResult = mLang; |
96 | 0 | } |
97 | | |
98 | | void |
99 | | SpeechSynthesisUtterance::SetLang(const nsAString& aLang) |
100 | 0 | { |
101 | 0 | mLang = aLang; |
102 | 0 | } |
103 | | |
104 | | SpeechSynthesisVoice* |
105 | | SpeechSynthesisUtterance::GetVoice() const |
106 | 0 | { |
107 | 0 | return mVoice; |
108 | 0 | } |
109 | | |
110 | | void |
111 | | SpeechSynthesisUtterance::SetVoice(SpeechSynthesisVoice* aVoice) |
112 | 0 | { |
113 | 0 | mVoice = aVoice; |
114 | 0 | } |
115 | | |
116 | | float |
117 | | SpeechSynthesisUtterance::Volume() const |
118 | 0 | { |
119 | 0 | return mVolume; |
120 | 0 | } |
121 | | |
122 | | void |
123 | | SpeechSynthesisUtterance::SetVolume(float aVolume) |
124 | 0 | { |
125 | 0 | mVolume = std::max<float>(std::min<float>(aVolume, 1), 0); |
126 | 0 | } |
127 | | |
128 | | float |
129 | | SpeechSynthesisUtterance::Rate() const |
130 | 0 | { |
131 | 0 | return mRate; |
132 | 0 | } |
133 | | |
134 | | void |
135 | | SpeechSynthesisUtterance::SetRate(float aRate) |
136 | 0 | { |
137 | 0 | mRate = std::max<float>(std::min<float>(aRate, 10), 0.1f); |
138 | 0 | } |
139 | | |
140 | | float |
141 | | SpeechSynthesisUtterance::Pitch() const |
142 | 0 | { |
143 | 0 | return mPitch; |
144 | 0 | } |
145 | | |
146 | | void |
147 | | SpeechSynthesisUtterance::SetPitch(float aPitch) |
148 | 0 | { |
149 | 0 | mPitch = std::max<float>(std::min<float>(aPitch, 2), 0); |
150 | 0 | } |
151 | | |
152 | | void |
153 | | SpeechSynthesisUtterance::GetChosenVoiceURI(nsString& aResult) const |
154 | 0 | { |
155 | 0 | aResult = mChosenVoiceURI; |
156 | 0 | } |
157 | | |
158 | | void |
159 | | SpeechSynthesisUtterance::DispatchSpeechSynthesisEvent(const nsAString& aEventType, |
160 | | uint32_t aCharIndex, |
161 | | const Nullable<uint32_t>& aCharLength, |
162 | | float aElapsedTime, |
163 | | const nsAString& aName) |
164 | 0 | { |
165 | 0 | SpeechSynthesisEventInit init; |
166 | 0 | init.mBubbles = false; |
167 | 0 | init.mCancelable = false; |
168 | 0 | init.mUtterance = this; |
169 | 0 | init.mCharIndex = aCharIndex; |
170 | 0 | init.mCharLength = aCharLength; |
171 | 0 | init.mElapsedTime = aElapsedTime; |
172 | 0 | init.mName = aName; |
173 | 0 |
|
174 | 0 | RefPtr<SpeechSynthesisEvent> event = |
175 | 0 | SpeechSynthesisEvent::Constructor(this, aEventType, init); |
176 | 0 | DispatchTrustedEvent(event); |
177 | 0 | } |
178 | | |
179 | | } // namespace dom |
180 | | } // namespace mozilla |