Improve Safe Haskell bounds for changes to base over time
[ghc.git] / libraries / base / GHC / IO / Encoding / CodePage / API.hs
1 {-# LANGUAGE Trustworthy #-}
2 {-# LANGUAGE CPP, NoImplicitPrelude, NondecreasingIndentation, RecordWildCards, ScopedTypeVariables #-}
3 {-# OPTIONS_GHC -fno-warn-name-shadowing #-}
4 module GHC.IO.Encoding.CodePage.API (
5 mkCodePageEncoding
6 ) where
7
8 import Foreign.C
9 import Foreign.Ptr
10 import Foreign.Marshal
11 import Foreign.Storable
12 import Data.Bits
13 import Data.Either
14 import Data.Word
15
16 import GHC.Base
17 import GHC.List
18 import GHC.IO.Buffer
19 import GHC.IO.Encoding.Failure
20 import GHC.IO.Encoding.Types
21 import GHC.IO.Encoding.UTF16
22 import GHC.Num
23 import GHC.Show
24 import GHC.Real
25 import GHC.Windows
26 import GHC.ForeignPtr (castForeignPtr)
27
28 import System.Posix.Internals
29
30
31 c_DEBUG_DUMP :: Bool
32 c_DEBUG_DUMP = False
33
34 debugIO :: String -> IO ()
35 debugIO s
36 | c_DEBUG_DUMP = puts s
37 | otherwise = return ()
38
39
40 #if defined(i386_HOST_ARCH)
41 # define WINDOWS_CCONV stdcall
42 #elif defined(x86_64_HOST_ARCH)
43 # define WINDOWS_CCONV ccall
44 #else
45 # error Unknown mingw32 arch
46 #endif
47
48
49 type LPCSTR = Ptr Word8
50
51
52 mAX_DEFAULTCHAR :: Int
53 mAX_DEFAULTCHAR = 2
54
55 mAX_LEADBYTES :: Int
56 mAX_LEADBYTES = 12
57
58 -- Don't really care about the contents of this, but we have to make sure the size is right
59 data CPINFO = CPINFO {
60 maxCharSize :: UINT,
61 defaultChar :: [BYTE], -- ^ Always of length mAX_DEFAULTCHAR
62 leadByte :: [BYTE] -- ^ Always of length mAX_LEADBYTES
63 }
64
65 instance Storable CPINFO where
66 sizeOf _ = sizeOf (undefined :: UINT) + (mAX_DEFAULTCHAR + mAX_LEADBYTES) * sizeOf (undefined :: BYTE)
67 alignment _ = alignment (undefined :: CInt)
68 peek ptr = do
69 ptr <- return $ castPtr ptr
70 a <- peek ptr
71 ptr <- return $ castPtr $ advancePtr ptr 1
72 b <- peekArray mAX_DEFAULTCHAR ptr
73 c <- peekArray mAX_LEADBYTES (advancePtr ptr mAX_DEFAULTCHAR)
74 return $ CPINFO a b c
75 poke ptr val = do
76 ptr <- return $ castPtr ptr
77 poke ptr (maxCharSize val)
78 ptr <- return $ castPtr $ advancePtr ptr 1
79 pokeArray' "CPINFO.defaultChar" mAX_DEFAULTCHAR ptr (defaultChar val)
80 pokeArray' "CPINFO.leadByte" mAX_LEADBYTES (advancePtr ptr mAX_DEFAULTCHAR) (leadByte val)
81
82 pokeArray' :: Storable a => String -> Int -> Ptr a -> [a] -> IO ()
83 pokeArray' msg sz ptr xs | length xs == sz = pokeArray ptr xs
84 | otherwise = error $ msg ++ ": expected " ++ show sz ++ " elements in list but got " ++ show (length xs)
85
86
87 foreign import WINDOWS_CCONV unsafe "windows.h GetCPInfo"
88 c_GetCPInfo :: UINT -- ^ CodePage
89 -> Ptr CPINFO -- ^ lpCPInfo
90 -> IO BOOL
91
92 foreign import WINDOWS_CCONV unsafe "windows.h MultiByteToWideChar"
93 c_MultiByteToWideChar :: UINT -- ^ CodePage
94 -> DWORD -- ^ dwFlags
95 -> LPCSTR -- ^ lpMultiByteStr
96 -> CInt -- ^ cbMultiByte
97 -> LPWSTR -- ^ lpWideCharStr
98 -> CInt -- ^ cchWideChar
99 -> IO CInt
100
101 foreign import WINDOWS_CCONV unsafe "windows.h WideCharToMultiByte"
102 c_WideCharToMultiByte :: UINT -- ^ CodePage
103 -> DWORD -- ^ dwFlags
104 -> LPWSTR -- ^ lpWideCharStr
105 -> CInt -- ^ cchWideChar
106 -> LPCSTR -- ^ lpMultiByteStr
107 -> CInt -- ^ cbMultiByte
108 -> LPCSTR -- ^ lpDefaultChar
109 -> LPBOOL -- ^ lpUsedDefaultChar
110 -> IO CInt
111
112 foreign import WINDOWS_CCONV unsafe "windows.h IsDBCSLeadByteEx"
113 c_IsDBCSLeadByteEx :: UINT -- ^ CodePage
114 -> BYTE -- ^ TestChar
115 -> IO BOOL
116
117
118 -- | Returns a slow but correct implementation of TextEncoding using the Win32 API.
119 --
120 -- This is useful for supporting DBCS text encoding on the console without having to statically link
121 -- in huge code tables into all of our executables, or just as a fallback mechanism if a new code page
122 -- is introduced that we don't know how to deal with ourselves yet.
123 mkCodePageEncoding :: CodingFailureMode -> Word32 -> TextEncoding
124 mkCodePageEncoding cfm cp
125 = TextEncoding {
126 textEncodingName = "CP" ++ show cp,
127 mkTextDecoder = newCP (recoverDecode cfm) cpDecode cp,
128 mkTextEncoder = newCP (recoverEncode cfm) cpEncode cp
129 }
130
131 newCP :: (Buffer from -> Buffer to -> IO (Buffer from, Buffer to))
132 -> (Word32 -> Int -> CodeBuffer from to)
133 -> Word32
134 -> IO (BufferCodec from to ())
135 newCP rec fn cp = do
136 -- Fail early if the code page doesn't exist, to match the behaviour of the IConv TextEncoding
137 max_char_size <- alloca $ \cpinfo_ptr -> do
138 success <- c_GetCPInfo cp cpinfo_ptr
139 when (not success) $ throwGetLastError ("GetCPInfo " ++ show cp)
140 fmap (fromIntegral . maxCharSize) $ peek cpinfo_ptr
141
142 debugIO $ "GetCPInfo " ++ show cp ++ " = " ++ show max_char_size
143
144 return $ BufferCodec {
145 encode = fn cp max_char_size,
146 recover = rec,
147 close = return (),
148 -- Windows doesn't supply a way to save/restore the state and doesn't need one
149 -- since it's a dumb string->string API rather than a clever streaming one.
150 getState = return (),
151 setState = const $ return ()
152 }
153
154
155 utf16_native_encode' :: EncodeBuffer
156 utf16_native_decode' :: DecodeBuffer
157 #ifdef WORDS_BIGENDIAN
158 utf16_native_encode' = utf16be_encode
159 utf16_native_decode' = utf16be_decode
160 #else
161 utf16_native_encode' = utf16le_encode
162 utf16_native_decode' = utf16le_decode
163 #endif
164
165 saner :: CodeBuffer from to
166 -> Buffer from -> Buffer to
167 -> IO (CodingProgress, Int, Buffer from, Buffer to)
168 saner code ibuf obuf = do
169 (why, ibuf', obuf') <- code ibuf obuf
170 -- Weird but true: the UTF16 codes have a special case (see the "done" functions)
171 -- whereby if they entirely consume the input instead of returning an input buffer
172 -- that is empty because bufL has reached bufR, they return a buffer that is empty
173 -- because bufL = bufR = 0.
174 --
175 -- This is really very odd and confusing for our code that expects the difference
176 -- between the old and new input buffer bufLs to indicate the number of elements
177 -- that were consumed!
178 --
179 -- We fix it by explicitly extracting an integer which is the # of things consumed, like so:
180 if isEmptyBuffer ibuf'
181 then return (InputUnderflow, bufferElems ibuf, ibuf', obuf')
182 else return (why, bufL ibuf' - bufL ibuf, ibuf', obuf')
183
184 byteView :: Buffer CWchar -> Buffer Word8
185 byteView (Buffer {..}) = Buffer { bufState = bufState, bufRaw = castForeignPtr bufRaw, bufSize = bufSize * 2, bufL = bufL * 2, bufR = bufR * 2 }
186
187 cwcharView :: Buffer Word8 -> Buffer CWchar
188 cwcharView (Buffer {..}) = Buffer { bufState = bufState, bufRaw = castForeignPtr bufRaw, bufSize = half bufSize, bufL = half bufL, bufR = half bufR }
189 where half x = case x `divMod` 2 of (y, 0) -> y
190 _ -> error "cwcharView: utf16_(encode|decode) (wrote out|consumed) non multiple-of-2 number of bytes"
191
192 utf16_native_encode :: CodeBuffer Char CWchar
193 utf16_native_encode ibuf obuf = do
194 (why, ibuf, obuf) <- utf16_native_encode' ibuf (byteView obuf)
195 return (why, ibuf, cwcharView obuf)
196
197 utf16_native_decode :: CodeBuffer CWchar Char
198 utf16_native_decode ibuf obuf = do
199 (why, ibuf, obuf) <- utf16_native_decode' (byteView ibuf) obuf
200 return (why, cwcharView ibuf, obuf)
201
202 cpDecode :: Word32 -> Int -> DecodeBuffer
203 cpDecode cp max_char_size = \ibuf obuf -> do
204 #ifdef CHARBUF_UTF16
205 let mbuf = obuf
206 #else
207 -- FIXME: share the buffer between runs, even if the buffer is not the perfect size
208 let sz = (bufferElems ibuf * 2) -- I guess in the worst case the input CP text consists of 1-byte sequences that map entirely to things outside the BMP and so require 2 UTF-16 chars
209 `min` (bufferAvailable obuf * 2) -- In the best case, each pair of UTF-16 points becomes a single UTF-32 point
210 mbuf <- newBuffer (2 * sz) sz WriteBuffer :: IO (Buffer CWchar)
211 #endif
212 debugIO $ "cpDecode " ++ summaryBuffer ibuf ++ " " ++ summaryBuffer mbuf
213 (why1, ibuf', mbuf') <- cpRecode try' is_valid_prefix max_char_size 1 0 1 ibuf mbuf
214 debugIO $ "cpRecode (cpDecode) = " ++ show why1 ++ " " ++ summaryBuffer ibuf' ++ " " ++ summaryBuffer mbuf'
215 #ifdef CHARBUF_UTF16
216 return (why1, ibuf', mbuf')
217 #else
218 -- Convert as much UTF-16 as possible to UTF-32. Note that it's impossible for this to fail
219 -- due to illegal characters since the output from Window's encoding function should be correct UTF-16.
220 -- However, it's perfectly possible to run out of either output or input buffer.
221 debugIO $ "utf16_native_decode " ++ summaryBuffer mbuf' ++ " " ++ summaryBuffer obuf
222 (why2, target_utf16_count, mbuf', obuf) <- saner utf16_native_decode (mbuf' { bufState = ReadBuffer }) obuf
223 debugIO $ "utf16_native_decode = " ++ show why2 ++ " " ++ summaryBuffer mbuf' ++ " " ++ summaryBuffer obuf
224 case why2 of
225 -- If we successfully translate all of the UTF-16 buffer, we need to know why we couldn't get any more
226 -- UTF-16 out of the Windows API
227 InputUnderflow | isEmptyBuffer mbuf' -> return (why1, ibuf', obuf)
228 | otherwise -> error "cpDecode: impossible underflown UTF-16 buffer"
229 -- InvalidSequence should be impossible since mbuf' is output from Windows.
230 InvalidSequence -> error "InvalidSequence on output of Windows API"
231 -- If we run out of space in obuf, we need to ask for more output buffer space, while also returning
232 -- the characters we have managed to consume so far.
233 OutputUnderflow -> do
234 -- We have an interesting problem here similar to the cpEncode case where we have to figure out how much
235 -- of the byte buffer was consumed to reach as far as the last UTF-16 character we actually decoded to UTF-32 OK.
236 --
237 -- The minimum number of bytes it could take is half the number of UTF-16 chars we got on the output, since
238 -- one byte could theoretically generate two UTF-16 characters.
239 -- The common case (ASCII text) is that every byte in the input maps to a single UTF-16 character.
240 -- In the worst case max_char_size bytes map to each UTF-16 character.
241 byte_count <- bSearch "cpDecode" (cpRecode try' is_valid_prefix max_char_size 1 0 1) ibuf mbuf target_utf16_count (target_utf16_count `div` 2) target_utf16_count (target_utf16_count * max_char_size)
242 return (OutputUnderflow, bufferRemove byte_count ibuf, obuf)
243 #endif
244 where
245 is_valid_prefix = c_IsDBCSLeadByteEx cp
246 try' iptr icnt optr ocnt
247 -- MultiByteToWideChar does surprising things if you have ocnt == 0
248 | ocnt == 0 = return (Left True)
249 | otherwise = do
250 err <- c_MultiByteToWideChar (fromIntegral cp) 8 -- MB_ERR_INVALID_CHARS == 8: Fail if an invalid input character is encountered
251 iptr (fromIntegral icnt) optr (fromIntegral ocnt)
252 debugIO $ "MultiByteToWideChar " ++ show cp ++ " 8 " ++ show iptr ++ " " ++ show icnt ++ " " ++ show optr ++ " " ++ show ocnt ++ "\n = " ++ show err
253 case err of
254 -- 0 indicates that we did not succeed
255 0 -> do
256 err <- getLastError
257 case err of
258 122 -> return (Left True)
259 1113 -> return (Left False)
260 _ -> failWith "MultiByteToWideChar" err
261 wrote_chars -> return (Right (fromIntegral wrote_chars))
262
263 cpEncode :: Word32 -> Int -> EncodeBuffer
264 cpEncode cp _max_char_size = \ibuf obuf -> do
265 #ifdef CHARBUF_UTF16
266 let mbuf' = ibuf
267 #else
268 -- FIXME: share the buffer between runs, even though that means we can't size the buffer as we want.
269 let sz = (bufferElems ibuf * 2) -- UTF-32 always uses 4 bytes. UTF-16 uses at most 4 bytes.
270 `min` (bufferAvailable obuf * 2) -- In the best case, each pair of UTF-16 points fits into only 1 byte
271 mbuf <- newBuffer (2 * sz) sz WriteBuffer
272
273 -- Convert as much UTF-32 as possible to UTF-16. NB: this can't fail due to output underflow
274 -- since we sized the output buffer correctly. However, it could fail due to an illegal character
275 -- in the input if it encounters a lone surrogate. In this case, our recovery will be applied as normal.
276 (why1, ibuf', mbuf') <- utf16_native_encode ibuf mbuf
277 #endif
278 debugIO $ "\ncpEncode " ++ summaryBuffer mbuf' ++ " " ++ summaryBuffer obuf
279 (why2, target_utf16_count, mbuf', obuf) <- saner (cpRecode try' is_valid_prefix 2 1 1 0) (mbuf' { bufState = ReadBuffer }) obuf
280 debugIO $ "cpRecode (cpEncode) = " ++ show why2 ++ " " ++ summaryBuffer mbuf' ++ " " ++ summaryBuffer obuf
281 #ifdef CHARBUF_UTF16
282 return (why2, mbuf', obuf)
283 #else
284 case why2 of
285 -- If we succesfully translate all of the UTF-16 buffer, we need to know why
286 -- we weren't able to get any more UTF-16 out of the UTF-32 buffer
287 InputUnderflow | isEmptyBuffer mbuf' -> return (why1, ibuf', obuf)
288 | otherwise -> error "cpEncode: impossible underflown UTF-16 buffer"
289 -- With OutputUnderflow/InvalidSequence we only care about the failings of the UTF-16->CP translation.
290 -- Yes, InvalidSequence is possible even though mbuf' is guaranteed to be valid UTF-16, because
291 -- the code page may not be able to represent the encoded Unicode codepoint.
292 _ -> do
293 -- Here is an interesting problem. If we have only managed to translate part of the mbuf'
294 -- then we need to return an ibuf which has consumed exactly those bytes required to obtain
295 -- that part of the mbuf'. To reconstruct this information, we binary search for the number of
296 -- UTF-32 characters required to get the consumed count of UTF-16 characters:
297 --
298 -- When dealing with data from the BMP (the common case), consuming N UTF-16 characters will be the same as consuming N
299 -- UTF-32 characters. We start our search there so that most binary searches will terminate in a single iteration.
300 -- Furthermore, the absolute minimum number of UTF-32 characters this can correspond to is 1/2 the UTF-16 byte count
301 -- (this will be realised when the input data is entirely not in the BMP).
302 utf32_count <- bSearch "cpEncode" utf16_native_encode ibuf mbuf target_utf16_count (target_utf16_count `div` 2) target_utf16_count target_utf16_count
303 return (why2, bufferRemove utf32_count ibuf, obuf)
304 #endif
305 where
306 -- Single characters should be mappable to bytes. If they aren't supported by the CP then we have an invalid input sequence.
307 is_valid_prefix _ = return False
308
309 try' iptr icnt optr ocnt
310 -- WideCharToMultiByte does surprising things if you call it with ocnt == 0
311 | ocnt == 0 = return (Left True)
312 | otherwise = alloca $ \defaulted_ptr -> do
313 poke defaulted_ptr False
314 err <- c_WideCharToMultiByte (fromIntegral cp) 0 -- NB: the WC_ERR_INVALID_CHARS flag is uselses: only has an effect with the UTF-8 code page
315 iptr (fromIntegral icnt) optr (fromIntegral ocnt)
316 nullPtr defaulted_ptr
317 defaulted <- peek defaulted_ptr
318 debugIO $ "WideCharToMultiByte " ++ show cp ++ " 0 " ++ show iptr ++ " " ++ show icnt ++ " " ++ show optr ++ " " ++ show ocnt ++ " NULL " ++ show defaulted_ptr ++ "\n = " ++ show err ++ ", " ++ show defaulted
319 case err of
320 -- 0 indicates that we did not succeed
321 0 -> do
322 err <- getLastError
323 case err of
324 122 -> return (Left True)
325 1113 -> return (Left False)
326 _ -> failWith "WideCharToMultiByte" err
327 wrote_bytes | defaulted -> return (Left False)
328 | otherwise -> return (Right (fromIntegral wrote_bytes))
329
330 bSearch :: String
331 -> CodeBuffer from to
332 -> Buffer from -> Buffer to -- From buffer (crucial data source) and to buffer (temporary storage only). To buffer must be empty (L=R).
333 -> Int -- Target size of to buffer
334 -> Int -> Int -> Int -- Binary search min, mid, max
335 -> IO Int -- Size of from buffer required to reach target size of to buffer
336 bSearch msg code ibuf mbuf target_to_elems = go
337 where
338 go mn md mx = do
339 -- NB: this loop repeatedly reencodes on top of mbuf using a varying fraction of ibuf. It doesn't
340 -- matter if we blast the contents of mbuf since we already consumed all of the contents we are going to use.
341 (_why, ibuf, mbuf) <- code (ibuf { bufR = bufL ibuf + md }) mbuf
342 debugIO $ "code (bSearch " ++ msg ++ ") " ++ show md ++ " = " ++ show _why ++ ", " ++ summaryBuffer ibuf ++ summaryBuffer mbuf
343 -- The normal case is to get InputUnderflow here, which indicates that coding basically
344 -- terminated normally.
345 --
346 -- However, InvalidSequence is also possible if we are being called from cpDecode if we
347 -- have just been unlucky enough to set md so that ibuf straddles a byte boundary.
348 -- In this case we have to be really careful, because we don't want to report that
349 -- "md" elements is the right number when in actual fact we could have had md-1 input
350 -- elements and still produced the same number of bufferElems in mbuf.
351 --
352 -- In fact, we have to worry about this possibility even if we get InputUnderflow
353 -- since that will report InputUnderflow rather than InvalidSequence if the buffer
354 -- ends in a valid lead byte. So the expedient thing to do is simply to check if
355 -- the input buffer was entirely consumed.
356 --
357 -- When called from cpDecode, OutputUnderflow is also possible.
358 --
359 -- Luckily if we have InvalidSequence/OutputUnderflow and we do not appear to have reached
360 -- the target, what we should do is the same as normal because the fraction of ibuf that our
361 -- first "code" coded succesfully must be invalid-sequence-free, and ibuf will always
362 -- have been decoded as far as the first invalid sequence in it.
363 case bufferElems mbuf `compare` target_to_elems of
364 -- Coding n "from" chars from the input yields exactly as many "to" chars
365 -- as were consumed by the recode. All is peachy:
366 EQ -> debugIO ("bSearch = " ++ show solution) >> return solution
367 where solution = md - bufferElems ibuf
368 -- If we encoded fewer "to" characters than the target number, try again with more "from" characters (and vice-versa)
369 LT -> go' (md+1) mx
370 GT -> go' mn (md-1)
371 go' mn mx | mn <= mx = go mn (mn + ((mx - mn) `div` 2)) mx
372 | otherwise = error $ "bSearch(" ++ msg ++ "): search crossed! " ++ show (summaryBuffer ibuf, summaryBuffer mbuf, target_to_elems, mn, mx)
373
374 cpRecode :: forall from to. (Show from, Storable from)
375 => (Ptr from -> Int -> Ptr to -> Int -> IO (Either Bool Int))
376 -> (from -> IO Bool)
377 -> Int -- ^ Maximum length of a complete translatable sequence in the input (e.g. 2 if the input is UTF-16, 1 if the input is a SBCS, 2 is the input is a DBCS). Must be at least 1.
378 -> Int -- ^ Minimum number of output elements per complete translatable sequence in the input (almost certainly 1)
379 -> Int -> Int
380 -> CodeBuffer from to
381 cpRecode try' is_valid_prefix max_i_size min_o_size iscale oscale = go
382 where
383 go :: CodeBuffer from to
384 go ibuf obuf | isEmptyBuffer ibuf = return (InputUnderflow, ibuf, obuf)
385 | bufferAvailable obuf < min_o_size = return (OutputUnderflow, ibuf, obuf)
386 | otherwise = try (bufferElems ibuf `min` ((max_i_size * bufferAvailable obuf) `div` min_o_size)) seek_smaller
387 where
388 done why = return (why, ibuf, obuf)
389
390 seek_smaller n longer_was_valid
391 -- In this case, we can't shrink any further via any method. Calling (try 0) wouldn't be right because that will always claim InputUnderflow...
392 | n <= 1 = if longer_was_valid
393 -- try m (where m >= n) was valid but we overflowed the output buffer with even a single input element
394 then done OutputUnderflow
395 -- there was no initial valid sequence in the input, but it might just be a truncated buffer - we need to check
396 else do byte <- withBuffer ibuf $ \ptr -> peekElemOff ptr (bufL ibuf)
397 valid_prefix <- is_valid_prefix byte
398 done (if valid_prefix && bufferElems ibuf < max_i_size then InputUnderflow else InvalidSequence)
399 -- If we're already looking at very small buffers, try every n down to 1, to ensure we spot as long a sequence as exists while avoiding trying 0.
400 -- Doing it this way ensures that we spot a single initial sequence of length <= max_i_size if any such exists.
401 | n < 2 * max_i_size = try (n - 1) (\pred_n pred_n_was_valid -> seek_smaller pred_n (longer_was_valid || pred_n_was_valid))
402 -- Otherwise, try a binary chop to try to either get the prefix before the invalid input, or shrink the output down so it fits
403 -- in the output buffer. After the chop, try to consume extra input elements to try to recover as much of the sequence as possible if we
404 -- end up chopping a multi-element input sequence into two parts.
405 --
406 -- Note that since max_i_size >= 1:
407 -- * (n `div` 2) >= 1, so we don't try 0
408 -- * ((n `div` 2) + (max_i_size - 1)) < n, so we don't get into a loop where (seek_smaller n) calls post_divide (n `div` 2) calls (seek_smaller n)
409 | let n' = n `div` 2 = try n' (post_divide n' longer_was_valid)
410
411 post_divide _ _ n True = seek_smaller n True
412 post_divide n' longer_was_valid n False | n < n' + max_i_size - 1 = try (n + 1) (post_divide n' longer_was_valid) -- There's still a chance..
413 | otherwise = seek_smaller n' longer_was_valid -- No amount of recovery could save us :(
414
415 try n k_fail = withBuffer ibuf $ \iptr -> withBuffer obuf $ \optr -> do
416 ei_err_wrote <- try' (iptr `plusPtr` (bufL ibuf `shiftL` iscale)) n
417 (optr `plusPtr` (bufR obuf `shiftL` oscale)) (bufferAvailable obuf)
418 debugIO $ "try " ++ show n ++ " = " ++ show ei_err_wrote
419 case ei_err_wrote of
420 -- ERROR_INSUFFICIENT_BUFFER: A supplied buffer size was not large enough, or it was incorrectly set to NULL.
421 Left True -> k_fail n True
422 -- ERROR_NO_UNICODE_TRANSLATION: Invalid Unicode was found in a string.
423 Left False -> k_fail n False
424 -- Must have interpreted all given bytes successfully
425 -- We need to iterate until we have consumed the complete contents of the buffer
426 Right wrote_elts -> go (bufferRemove n ibuf) (obuf { bufR = bufR obuf + wrote_elts })