For use with nn_sequential.
Shape
Input:
(*, S_start,..., S_i, ..., S_end, *)
, whereS_i
is the size at dimensioni
and*
means any number of dimensions including none.Output:
(*, S_start*...*S_i*...S_end, *)
.
Examples
if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10-0.2061 0.5926 0.6800 -0.3597 0.2644 -0.5257 0.1042 -0.9326 -0.8547 -1.2562
#> 0.2783 0.5612 0.1489 -0.4697 0.6776 0.5832 -1.0390 -1.4859 0.2804 1.1474
#> -1.2395 -0.0919 -0.1514 -0.6147 -1.7027 0.5231 -0.1818 -1.8060 -0.2522 -0.2575
#> 1.3485 -0.7461 1.3595 1.9313 -1.7233 -1.0740 -1.1430 1.1714 -0.9156 -0.8039
#> -2.0064 -1.4307 0.0072 1.2391 0.2070 1.7725 -0.9827 -0.5497 -0.5584 0.8348
#> 0.7009 -1.7190 -1.9887 -0.1877 -0.1691 -0.7999 -1.1704 0.8539 0.8351 0.0943
#> 0.7007 0.3993 0.4109 1.4820 1.1792 2.1885 -0.0446 -0.8584 2.4315 -0.0359
#> -0.9200 -0.3903 -0.4532 1.0427 0.1550 -0.7777 -0.4709 0.9434 0.2150 -0.4901
#> 0.6205 0.3783 -0.6096 -0.9591 -0.5497 0.0217 -0.9843 -0.5777 1.5201 -0.8664
#> 0.5181 -0.7818 -0.6255 1.2758 -0.4357 -0.0431 -0.5118 -0.2483 0.6868 -0.4833
#> -0.3160 0.1242 0.0536 -0.7888 -0.6800 0.2286 -0.2428 -0.4903 -1.1189 1.0464
#> 0.7026 -1.1695 1.2333 1.1553 1.3874 -0.0843 0.5393 -0.6196 0.9076 1.0923
#> -2.2958 0.3708 -1.5479 0.0046 0.7976 -0.0206 0.4327 0.7761 0.1882 -1.2645
#> -0.0827 -1.1767 -0.6671 0.9020 -1.4176 -0.3433 -0.1471 0.0091 1.8391 1.3508
#> -0.8594 -0.8377 -0.6192 0.9613 0.5887 -0.2027 -0.4690 -0.1376 0.0543 1.5517
#> 0.6484 -0.8588 0.9828 -0.9731 0.5196 0.0569 0.4614 -1.6472 -0.7188 0.2071
#> 0.4826 -1.6939 0.6183 0.0819 1.0094 -0.0200 0.9866 -1.5408 0.7429 -0.0823
#> -0.7354 -0.6771 -0.8575 -1.0925 0.8054 1.0574 -0.5689 1.8546 -1.1928 -0.1752
#> -0.1378 0.9201 0.8152 0.0818 0.0531 -2.3447 0.8267 -1.2707 -0.0069 -1.0040
#> -0.3522 -0.9843 -0.8313 0.6468 -0.3817 0.0399 0.2351 -0.5011 0.5512 0.2125
#> 0.7718 0.3691 0.0227 0.3448 0.1790 0.7582 1.6185 0.1086 2.1357 -0.4894
#> -1.0115 -0.4153 1.7637 1.0019 -0.7599 -0.2473 -0.6647 -1.0672 1.2728 -0.4579
#> 0.4429 0.4245 0.2358 1.2267 -0.5137 -0.7538 1.1298 0.8629 0.6204 -1.0294
#> 1.2114 1.3885 -0.0376 -1.4533 -0.8043 1.3162 -0.8944 0.7620 0.4583 1.7064
#> 1.3063 0.5083 -1.6361 0.2766 2.1287 0.5842 -1.2342 -0.7924 -3.3385 0.4230
#> -0.5648 1.0770 0.9727 -0.6774 -0.3351 -0.8140 0.9766 0.6885 2.0893 0.1109
#> -1.1653 0.5656 -0.3703 -2.1824 0.4589 -2.5347 -1.1149 0.0803 0.5300 0.3586
#> 2.0128 0.1394 -0.7910 -1.1782 -0.5336 1.5971 0.4216 -0.2752 0.2078 -0.1693
#> -0.7862 0.2946 -0.2903 0.9678 0.6501 -2.2288 0.5504 0.1948 0.6873 -0.4591
#> -0.2279 0.6900 0.0843 -0.7947 -0.2358 -0.3146 -1.4641 0.5708 -0.0272 1.0409
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]