#if !defined(__AVR_TINY__)
#ifndef __DOXYGEN__
#include "macros.inc"
#include "ntz.h"
#include "xtoa_fast.h"
#define v_lo r22
#define v_hi r23
#define v_hlo r24
#define v_hhi r25
#define str_lo r20
#define str_hi r21
#define base r18
#define flags r19
#define v_fifth r26
#define t_lo r18
#define t_hi r19
#define t_hlo r20
#define t_hhi r21
#define symb r20
#define cnt r27
#define rzero r1
ASSEMBLY_CLIB_SECTION
.global __ultoa_invert
.type __ultoa_invert, "function"
__ultoa_invert:
X_movw ZL, str_lo
clr v_fifth ; needed for all (ultoa_lsr)
cpi base, 8
breq .L_oct
cpi base, 16
breq .L_hex
; decimal format
clt ; flag of val == 0
.L_dec_loop:
push v_lo ; to calculate remander
; val &= ~1
andi v_lo, ~1
; val += 2
subi v_lo, lo8(-2)
sbci v_hi, hi8(-2)
sbci v_hlo, hlo8(-2)
sbci v_hhi, hhi8(-2)
sbci v_fifth, hhi8(-2)
; val += val/2
ldi cnt, 1
rcall .L_div_add
; val += val/16
ldi cnt, 4
rcall .L_div_add
; val += val/256
add v_lo, v_hi
adc v_hi, v_hlo
adc v_hlo, v_hhi
adc v_hhi, v_fifth
adc v_fifth, rzero
; val += val/65536
add v_lo, v_hlo
adc v_hi, v_hhi
adc v_hlo, v_fifth
adc v_hhi, rzero
adc v_fifth, rzero
; val += val >> 32
add v_lo, v_fifth
adc v_hi, rzero
adc v_hlo, rzero
adc v_hhi, rzero
adc v_fifth, rzero
; division result: val /= 16
rcall .L_lsr_4 ; v_fitth := 0
brne 1f
set ; T := Z flag
1:
; rem: val_original - 10*val
pop t_hi
#if defined(__AVR_ENHANCED__) && __AVR_ENHANCED__
ldi t_lo, 10
mul t_lo, v_lo
clr r1
#else
mov r0, v_lo
lsl r0
sub t_hi, r0
lsl r0
lsl r0
#endif
sub t_hi, r0
; output digit
subi t_hi, lo8(-'0')
st Z+, t_hi
; quotient == 0 ?
brtc .L_dec_loop
; end of string
.L_eos:
X_movw r24, ZL
ret
; octal format
.L_oct:
mov symb, v_lo
andi symb, 7
subi symb, lo8(-'0')
st Z+, symb
ldi cnt, 3
rcall .L_lsr
brne .L_oct
rjmp .L_eos
; hex format
.L_hex:
mov symb, v_lo
andi symb, 0x0f
subi symb, lo8(-'0')
cpi symb, '9' + 1
brlo 3f
subi symb, lo8('9' + 1 - 'a')
sbrc flags, ntz(XTOA_UPPER) - 8
subi symb, lo8('a' - 'A')
3: st Z+, symb
rcall .L_lsr_4
brne .L_hex
rjmp .L_eos
.L_lsr_4:
ldi cnt, 4
.L_lsr:
lsr v_fifth
ror v_hhi
ror v_hlo
ror v_hi
ror v_lo
dec cnt
brne .L_lsr
; tst
sbiw v_hlo, 0 ; only Z flag is needed
cpc v_lo, rzero
cpc v_hi, rzero
ret
.L_div_add:
; copy to temporary
X_movw t_lo, v_lo
X_movw t_hlo, v_hlo
mov r0, v_fifth
; lsr temporary
7: lsr r0
ror t_hhi
ror t_hlo
ror t_hi
ror t_lo
dec cnt
brne 7b
; add
add v_lo, t_lo
adc v_hi, t_hi
adc v_hlo, t_hlo
adc v_hhi, t_hhi
adc v_fifth, r0 ; here r0 == 0
ret
.size __ultoa_invert, . - __ultoa_invert
.end
#endif
#endif