compile-time f32, f64 operations are now correctly lossy

previously we used the bigfloat abstraction to do all
compile-time float math. but runtime code and comptime code
are supposed to get the same result. so now if you add a
f32 to a f32 at compile time it does it with f32 math
instead of the bigfloat. float literals still get the
bigfloat math.

closes #424
This commit is contained in:
Andrew Kelley
2017-08-20 00:33:05 -04:00
parent c73a0c92d0
commit 09bd4a9a86
14 changed files with 808 additions and 110 deletions

View File

@@ -540,7 +540,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
{
Buf rendered_buf = BUF_INIT;
buf_resize(&rendered_buf, 0);
bigfloat_write_buf(&rendered_buf, node->data.float_literal.bigfloat);
bigfloat_append_buf(&rendered_buf, node->data.float_literal.bigfloat);
fprintf(ar->f, "%s", buf_ptr(&rendered_buf));
}
break;
@@ -548,7 +548,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
{
Buf rendered_buf = BUF_INIT;
buf_resize(&rendered_buf, 0);
bigint_write_buf(&rendered_buf, node->data.int_literal.bigint, 10);
bigint_append_buf(&rendered_buf, node->data.int_literal.bigint, 10);
fprintf(ar->f, "%s", buf_ptr(&rendered_buf));
}
break;